Exemple #1
0
def get_meshes(seed, galaxies=False):
    mesh = {}
    mesh['s'] = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                                  'mesh/s/')
    partp = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                              'dynamic/1/Position/')
    mesh['cic'] = tools.paintcic(partp, bs, nc)
    mesh['cicovd'] = mesh['cic'] / mesh['cic'].mean() - 1
    mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    mesh['GD'] = mesh['R1'] - mesh['R2']

    hmesh = {}
    hposall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/PeakPosition/')[1:]
    massall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/Mass/')[1:].reshape(-1) * 1e10
    hposd = hposall[:num].copy()
    massd = massall[:num].copy()
    print(massd[-1] / 1e10)
    hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
    hmesh['pnn'] = tools.paintnn(hposd, bs, nc)
    hmesh['mnn'] = tools.paintnn(hposd, bs, nc, massd)
    hmesh['mcic'] = tools.paintcic(hposd, bs, nc, massd)
    hmesh['mcicovd'] = (hmesh['mcic'] -
                        hmesh['mcic'].mean()) / hmesh['mcic'].mean()
    data = hmesh['mcicovd']
    print(data.min(), data.max(), data.mean(), data.std())

    return mesh, hmesh
Exemple #2
0
def getgalmesh(bs,
               nc,
               seed,
               step=5,
               ncf=512,
               stepf=40,
               masswt=False,
               gridding='nn',
               path=None):

    if path is None: path = package_path + '/../../data/z00/'
    ftype = 'L%04d_N%04d_S%04d_%02dstep/'

    hpath = path + ftype % (bs, ncf, seed, stepf) + 'galaxies_n05/galcat/'
    hposd = tools.readbigfile(hpath + 'Position/')
    massd = tools.readbigfile(hpath + 'Mass/').reshape(-1) * 1e10
    galtype = tools.readbigfile(hpath + 'gal_type/').reshape(-1).astype(bool)
    if masswt: mass = massd
    else: mass = np.ones_like(massd)

    if gridding == 'nn':
        gmesh = tools.paintnn(hposd, bs, nc, mass=mass)
        satmesh = tools.paintnn(hposd[galtype], bs, nc, mass=mass[galtype])
        cenmesh = tools.paintnn(hposd[~galtype], bs, nc, mass=mass[~galtype])
    else:
        gmesh = tools.paintcic(hposd, bs, nc, mass=mass)
        satmesh = tools.paintcic(hposd[galtype], bs, nc, mass=mass[galtype])
        cenmesh = tools.paintcic(hposd[~galtype], bs, nc, mass=mass[~galtype])
    return cenmesh, satmesh, gmesh
Exemple #3
0
def get_meshes(seed, galaxies=False):
    mesh = {}
    mesh['s'] = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                                  'mesh/s/')
    partp = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                              'dynamic/1/Position/')
    mesh['cic'] = tools.paintcic(partp, bs, nc)
    mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    mesh['GD'] = mesh['R1'] - mesh['R2']

    hmesh = {}
    hposall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/PeakPosition/')[1:]
    massall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/Mass/')[1:].reshape(-1) * 1e10
    hposd = hposall[:num].copy()
    massd = massall[:num].copy()
    hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
    hmesh['pnn'] = tools.paintnn(hposd, bs, nc)
    hmesh['pnnsm'] = tools.fingauss(hmesh['pnn'], kk, R1, kny)
    hmesh['mnn'] = tools.paintnn(hposd, bs, nc, massd)

    return mesh, hmesh
def get_meshes(seed, galaxies=False):
    mesh = {}
    mesh['s'] = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                                  'mesh/s/')
    mesh['cic'] = np.load(path + ftypefpm % (bs, nc, seed, step) +
                          'mesh/d.npy')
    #    partp = tools.readbigfile(path + ftypefpm%(bs, nc, seed, step) + 'dynamic/1/Position/')
    #    mesh['cic'] = tools.paintcic(partp, bs, nc)
    #    mesh['logcic'] = np.log(1 + mesh['cic'])
    #    mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    #    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    #    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    #    mesh['GD'] = mesh['R1'] - mesh['R2']
    #
    hmesh = {}
    hposall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/PeakPosition/')[1:]
    massall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/Mass/')[1:].reshape(-1) * 1e10
    hposd = hposall[:num].copy()
    massd = massall[:num].copy()
    hmesh['pnn'] = tools.paintnn(hposd, bs, nc)
    hmesh['mnn'] = tools.paintnn(hposd, bs, nc, massd)
    hmesh['mnnnomean'] = (hmesh['mnn']) / hmesh['mnn'].mean()
    #hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
    #hmesh['mcic'] = tools.paintcic(hposd, bs, nc, massd)
    #hmesh['mcicnomean'] =  (hmesh['mcic'])/hmesh['mcic'].mean()
    #hmesh['mcicovd'] =  (hmesh['mcic'] - hmesh['mcic'].mean())/hmesh['mcic'].mean()
    #hmesh['mcicovdR3'] = tools.fingauss(hmesh['mcicovd'], kk, R1, kny)
    #hmesh['pcicovd'] =  (hmesh['pcic'] - hmesh['pcic'].mean())/hmesh['pcic'].mean()
    #hmesh['pcicovdR3'] = tools.fingauss(hmesh['pcicovd'], kk, R1, kny)
    #hmesh['lmnn'] = np.log(logoffset + hmesh['mnn'])

    return mesh, hmesh
Exemple #5
0
def gethalomesh(bs,
                nc,
                seed,
                step=5,
                ncf=512,
                stepf=40,
                masswt=False,
                numd=1e-3,
                gridding='nn',
                path=None,
                getdata=False):

    if path is None: path = package_path + '/../../data/z00/'
    ftype = 'L%04d_N%04d_S%04d_%02dstep/'

    num = int(numd * bs**3)
    hposall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/PeakPosition/')[1:]
    massall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/Mass/')[1:]
    hposd = hposall[:num]
    massd = massall[:num].reshape(-1) * 1e10
    if masswt: mass = massd
    else: mass = np.ones_like(massd)

    if gridding == 'nn': hmesh = tools.paintnn(hposd, bs, nc, mass=mass)
    else: hmesh = tools.paintcic(hposd, bs, nc, weights=mass)

    if getdata: return hmesh, hposd, massd
    else: return hmesh
Exemple #6
0
def generate_training_data():
    meshes = {}
    cube_features, cube_target = [[] for i in range(len(cube_sizes))
                                  ], [[] for i in range(len(cube_sizes))]

    for seed in seeds:
        mesh = {}
        partp = tools.readbigfile(path + ftype % (bs, nc, seed, step) +
                                  'dynamic/1/Position/')
        mesh['cic'] = tools.paintcic(partp, bs, ncp)
        #mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
        mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
        #mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
        #mesh['GD'] = mesh['R1'] - mesh['R2']

        hmesh = {}
        hpath = path + ftype % (bs, ncf, seed, stepf) + 'galaxies_n05/galcat/'
        hposd = tools.readbigfile(hpath + 'Position/')
        massd = tools.readbigfile(hpath + 'Mass/').reshape(-1) * 1e10
        galtype = tools.readbigfile(hpath +
                                    'gal_type/').reshape(-1).astype(bool)
        hmesh['pnn'] = tools.paintnn(hposd, bs, ncp)
        hmesh['mnn'] = tools.paintnn(hposd, bs, ncp, massd)
        hmesh['pnnsat'] = tools.paintnn(hposd[galtype], bs, ncp)
        hmesh['pnncen'] = tools.paintnn(hposd[~galtype], bs, ncp)
        meshes[seed] = [mesh, hmesh]

        print('All the mesh have been generated for seed = %d' % seed)

        #Create training voxels
        ftlist = [mesh[i].copy() for i in ftname]
        ftlistpad = [np.pad(i, pad, 'wrap') for i in ftlist]
        #     targetmesh = hmesh['pnn']
        targetmesh = [hmesh[i].copy() for i in tgname]

        for i, size in enumerate(cube_sizes):
            print('For size = ', size)
            if size == nc:
                features = [np.stack(ftlistpad, axis=-1)]
                target = [np.stack(targetmesh, axis=-1)]
            else:
                numcubes = int(num_cubes / size * 4)
                features, target = dtools.randomvoxels(ftlistpad,
                                                       targetmesh,
                                                       numcubes,
                                                       max_offset[i],
                                                       size,
                                                       cube_sizesft[i],
                                                       seed=seed,
                                                       rprob=0)
            cube_features[i] = cube_features[i] + features
            cube_target[i] = cube_target[i] + target

    # #
    for i in range(cube_sizes.size):
        cube_target[i] = np.stack(cube_target[i], axis=0)
        cube_features[i] = np.stack(cube_features[i], axis=0)
        print(cube_features[i].shape, cube_target[i].shape)

    return meshes, cube_features, cube_target
Exemple #7
0
def generate_training_data(seed, bs, nc):

    j = np.where(cube_sizes == nc)[0][0]

    path = '../data/make_data_code/L%d-N%d-B1-T5/S%d/' % (bs, nc, seed)
    #path = '../data/L%d-N%d-B1-T5/S%d/'%(bs, nc, seed)

    try:

        mesh = {}
        #    mesh['s'] = np.load(path + 'fpm-s.npy')
        mesh['cic'] = np.load(path + 'fpm-d.npy')
        #    mesh['logcic'] = np.log(1 + mesh['cic'])
        #    mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
        #    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
        #    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
        #    mesh['GD'] = mesh['R1'] - mesh['R2']
        #
        #    hpath = '../data/L%d-N%d-B2-T10/S%d/fastpm_1.0000/LL-0.200/'%(bs, nc*4, seed)
        hpath = '../data/make_data_code/L%d-N%d-B2-T10/S%d/FOF/' % (bs, nc * 4,
                                                                    seed)
        hmesh = {}
        hposall = tools.readbigfile(hpath + 'CMPosition/')[1:]
        massall = tools.readbigfile(hpath + 'Length/')[1:].reshape(-1)
        hposd = hposall[:num[j]].copy()
        massd = massall[:num[j]].copy()
        #    hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
        #    hmesh['pnn'] = tools.paintnn(hposd, bs, nc)
        #    hmesh['mnn'] = tools.paintnn(hposd, bs, nc, massd)
        hmesh['mcic'] = tools.paintcic(hposd, bs, nc, massd)
        hmesh['mcicnomean'] = (hmesh['mcic']) / hmesh['mcic'].mean()
        ##    hmesh['mcicovd'] =  (hmesh['mcic'] - hmesh['mcic'].mean())/hmesh['mcic'].mean()
        ##    hmesh['mcicovdR3'] = tools.fingauss(hmesh['mcicovd'], kk, R1, kny)
        ##    hmesh['pcicovd'] =  (hmesh['pcic'] - hmesh['pcic'].mean())/hmesh['pcic'].mean()
        ##    hmesh['pcicovdR3'] = tools.fingauss(hmesh['pcicovd'], kk, R1, kny)
        ##    hmesh['lmnn'] = np.log(logoffset + hmesh['mnn'])
        ##
        ftlist = [mesh[i].copy() for i in ftname]
        ftlistpad = [np.pad(i, pad, 'wrap') for i in ftlist]
        targetmesh = [hmesh[i].copy() for i in tgname]
        features = [np.stack(ftlistpad, axis=-1)]
        target = [np.stack(targetmesh, axis=-1)]

        cube_features[j] = cube_features[j] + features
        cube_target[j] = cube_target[j] + target

    except Exception as e:
        print(e)
Exemple #8
0
def get_diff_spectra(args, ipklin,  nsims=10, nsteps=3):

    bs, nc = args.bs, args.nc
    nsims = args.nsims
    numd = args.numd
    try: R=args.Rstd
    except: R=128
    ncf=args.ncf
    
    path = '//mnt/ceph/users/cmodi/cosmo4d/z00/'
    dpath = path + '/L%04d_N%04d_D%04d//'%(bs, nc, numd*1e4)
    alldata = np.array([np.load(dpath + 'S%04d.npy'%i) for i in range(100, 100+nsims)]).astype(np.float32)
    initdata = np.array([np.load(dpath + 'stdR%d_S%04d.npy'%(R,i)) for i in range(100, 100+nsims)]).astype(np.float32)

    try:
        dyn = "%02dstep"%nsteps
        path = '//mnt/ceph/users/cmodi/cosmo4d/z00/'
        path = path + '/L%04d_N%04d_%s//'%(bs, nc, dyn)
        final = np.array([tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/d/'%(bs, nc, seed, nsteps)) for seed in range(100, 100+nsims)]).astype(np.float32)
    except:
        dyn = "%02dstep_B1"%nsteps
        path = '//mnt/ceph/users/cmodi/cosmo4d/z00/'
        path = path + '/L%04d_N%04d_%s//'%(bs, nc, dyn)
        final = np.array([tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/d/'%(bs, nc, seed, nsteps)) for seed in range(100, 100+nsims)]).astype(np.float32)
        
    print('alldata shape :', alldata.shape)
    pdiffs, bb = [], []
    for j in range(nsims):
        k, pfin = tools.power(final[j], boxsize=bs)
        ph = tools.power(1+alldata[j, 1], boxsize=bs)[1]
        bias = ((ph[1:5]/pfin[1:5])**0.5).mean()
        bb.append(bias)
        recon = initdata[j] / bias
        precon =tools.power(1+recon, boxsize=bs)[1]
        pdiff = ipklin(k) - precon
        pdiffs.append(pdiff)

    pdiff = np.array(pdiffs).mean(axis=0)
    bias = np.array(bb).mean(axis=0)
    xx, yy = k[pdiff > 0], pdiff[pdiff > 0]
    ipkdiff = lambda x: 10**np.interp(np.log10(x), np.log10(xx), np.log10(yy))

    return ipkdiff, bias
Exemple #9
0
def get_meshes(seed, galaxies=False, inverse=True):
    mesh = {}
    mesh['s'] = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                                  'mesh/s/')
    partp = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                              'dynamic/1/Position/')
    mesh['cic'] = tools.paintcic(partp, bs, nc)
    mesh['ciclog'] = np.log(1e-3 + mesh['cic'])
    mesh['cicovd'] = mesh['cic'] / mesh['cic'].mean() - 1
    mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    mesh['GD'] = mesh['R1'] - mesh['R2']

    hmesh = {}
    hposall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/PeakPosition/')[1:]
    if stellar:
        massall = np.load(path + ftype % (bs, ncf, seed, stepf) +
                          'stellarmass.npy')
    else:
        massall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                    'FOF/Mass/')[1:].reshape(-1) * 1e10

    hposd = hposall[:num].copy()
    massd = massall[:num].copy()
    print(massall.min() / 1e10, massall.max() / 1e10)
    print(massd.min() / 1e10, massd.max() / 1e10)

    hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
    hmesh['pnn'] = tools.paintnn(hposd, bs, nc)
    hmesh['mnn'] = tools.paintnn(hposd, bs, nc, massd)
    hmesh['mcic'] = tools.paintcic(hposd, bs, nc, massd)
    hmesh['mcicnomean'] = (hmesh['mcic']) / hmesh['mcic'].mean()
    hmesh['mcicovd'] = (hmesh['mcic'] -
                        hmesh['mcic'].mean()) / hmesh['mcic'].mean()
    hmesh['pcicovd'] = (hmesh['pcic'] -
                        hmesh['pcic'].mean()) / hmesh['pcic'].mean()
    hmesh['pcicovdR3'] = tools.fingauss(hmesh['pcicovd'], kk, R1, kny)

    if inverse: return hmesh, mesh
    else: return mesh, hmesh
Exemple #10
0
def get_meshes(seed, pdict=defdict):
    for i in pdict.keys(): locals()[i] = pdict[i]

    mesh = {}
    mesh['s'] = tools.readbigfile(path + ftypefpm%(bs, nc, seed, step) + 'mesh/s/')
    partp = tools.readbigfile(path + ftypefpm%(bs, nc, seed, step) + 'dynamic/1/Position/')
    mesh['cic'] = tools.paintcic(partp, bs, ncp)
    #mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    mesh['GD'] = mesh['R1'] - mesh['R2']

    hmesh = {}
    hpath = path + ftype%(bs, ncf, seed, stepf) + 'FOF/'
    hposd = tools.readbigfile(hpath + 'PeakPosition/')
    massd = tools.readbigfile(hpath + 'Mass/').reshape(-1)*1e10
    #galtype = tools.readbigfile(hpath + 'gal_type/').reshape(-1).astype(bool)
    hposall = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/PeakPosition/')[1:]    
    massall = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/Mass/')[1:].reshape(-1)*1e10
    hposd = hposall[:num].copy()
    massd = massall[:num].copy()
    #hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
    hmesh['pnn'] = tools.paintnn(hposd, bs, ncp)
    hmesh['mnn'] = tools.paintnn(hposd, bs, ncp, massd)
    #hmesh['pnnsat'] = tools.paintnn(hposd[galtype], bs, ncp)
    #hmesh['pnncen'] = tools.paintnn(hposd[~galtype], bs, ncp)

    return mesh, hmesh
Exemple #11
0
def get_meshes(seed, galaxies=False, inverse=True):
    mesh = {}
    mesh['s'] = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                                  'mesh/s/')
    partp = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                              'dynamic/1/Position/')
    mesh['cic'] = tools.paintcic(partp, bs, nc)
    mesh['ciclog'] = np.log(1e-4 + mesh['cic'])
    ##    mesh['cicovd'] = mesh['cic']/mesh['cic'].mean()-1
    ##    mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    ##    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    ##    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    ##    mesh['GD'] = mesh['R1'] - mesh['R2']
    ##
    hmesh = {}
    hpath = path + ftype % (bs, ncf, seed, stepf) + 'galaxies_n05/galcat/'
    hposd = tools.readbigfile(hpath + 'Position/')
    massd = tools.readbigfile(hpath + 'Mass/').reshape(-1) * 1e10
    galtype = tools.readbigfile(hpath + 'gal_type/').reshape(-1).astype(bool)
    hmesh['pnn'] = tools.paintnn(hposd, bs, nc)
    hmesh['pnnovd'] = (hmesh['pnn'] -
                       hmesh['pnn'].mean()) / hmesh['pnn'].mean()
    hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
    hmesh['pcicovd'] = (hmesh['pcic'] -
                        hmesh['pcic'].mean()) / hmesh['pcic'].mean()
    hmesh['mnn'] = tools.paintnn(hposd, bs, nc, massd)
    hmesh['mnnovd'] = (hmesh['mnn'] -
                       hmesh['mnn'].mean()) / hmesh['mnn'].mean()
    hmesh['mcic'] = tools.paintcic(hposd, bs, nc, massd)
    hmesh['mcicovd'] = (hmesh['mcic'] -
                        hmesh['mcic'].mean()) / hmesh['mcic'].mean()
    ##    hmesh['mnn'] = tools.paintnn(hposd, bs, nc, massd)
    ##    hmesh['pnnsat'] = tools.paintnn(hposd[galtype], bs, nc)
    ##    hmesh['pnncen'] = tools.paintnn(hposd[~galtype], bs, nc)
    ##
    ##
    if inverse: return hmesh, mesh
    else: return mesh, hmesh
Exemple #12
0
def scattercatalog(seed, mmin=1e12):
    hmass = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/Mass/')[1:].reshape(-1)*1e10
    print(hmass.max()/1e12, hmass.min()/1e12)
    with open('../data/stellar.json', "r") as read_file:
        p = json.load(read_file)
    mbins = p['mbins']
    pm = p['stellarfit']
    ps = p['scatterfit']
    print(pm, ps)
    
    smassmean = fitstellar(pm, None, hmass, True)
    smasssig = fitscatter(ps, hmass, None, True)
    print(fitstellar(pm,  None, 1e12,True))
    print(fitscatter(ps,  1e12, None, True))

    smasssig[smasssig < 0.1] = 0.1
    np.random.seed(seed)
    scatter = np.random.normal(scale=smasssig)
    smass = np.exp(np.log(smassmean) + scatter)
    mask = hmass >= mmin
    smass[~mask] = -999
    np.save(path + ftype%(bs, ncf, seed, stepf) + '/stellarmass', smass)

    fig, ax = plt.subplots(1, 2, figsize=(9, 4), sharex=True, sharey=True)
    axis = ax[0]
    axis.plot(hmass[mask], smass[mask], '.')
    axis.plot(hmass[mask], smassmean[mask], '.')
    axis.loglog()
    axis.grid()
    axis.set_title('FastPM')

    axis = ax[1]
    axis.plot(mh[mh>mmin], ms[mh>mmin], '.')
    axis.plot(hmass[mask], smassmean[mask], '.')
    axis.loglog()
    axis.grid()
    axis.set_title('Illustris')
    plt.savefig(path + ftype%(bs, ncf, seed, stepf) + '/stellarmass.png')
    plt.close()
Exemple #13
0
saver = tf.train.import_meta_graph('./../code/models/gal%02d/%s/%s.meta'%(numd*1e4, suff, chkname))
saver.restore(sess,'./../code/models/gal%02d/%s/%s'%(numd*1e4, suff, chkname))
g = sess.graph
prediction = g.get_tensor_by_name('prediction:0')
input = g.get_tensor_by_name('input:0')
keepprob = g.get_tensor_by_name('keepprob:0')
rate = g.get_tensor_by_name('rate:0')
pdf = tfd.Poisson(rate=rate)
samplesat = pdf.sample()

#############################
meshes = {}
cube_features, cube_target = [], []
for seed in tseeds:
    mesh = {}
    partp = tools.readbigfile(path + ftype%(bs, nc, seed, step) + 'dynamic/1/Position/')
    mesh['cic'] = tools.paintcic(partp, bs, ncp)
    #mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    #mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    #mesh['GD'] = mesh['R1'] - mesh['R2']

    hmesh = {}
    hpath = path + ftype%(bs, ncf, seed, stepf) + 'galaxies_n05/galcat/'
    hposd = tools.readbigfile(hpath + 'Position/')
    massd = tools.readbigfile(hpath + 'Mass/').reshape(-1)*1e10
    galtype = tools.readbigfile(hpath + 'gal_type/').reshape(-1).astype(bool)
    #hposall = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/PeakPosition/')[1:]    
    #hposd = hposall[:num].copy()
    #massd = massall[:num].copy()
    #hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
Exemple #14
0
def main(_):

    infield = True
    dtype = tf.float32
    mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
    nc, bs = FLAGS.nc, FLAGS.box_size
    a0, a, nsteps = FLAGS.a0, FLAGS.af, FLAGS.nsteps
    stages = np.linspace(a0, a, nsteps, endpoint=True)
    numd = 1e-3

    ##Begin here
    klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)

    #pypath = '/global/cscratch1/sd/chmodi/cosmo4d/output/version2/L0400_N0128_05step-fof/lhd_S0100/n10/opt_s999_iM12-sm3v25off/meshes/'
    final = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/mesh/d/'
    )
    ic = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/mesh/s/'
    )
    fpos = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/dynamic/1/Position/'
    )
    aa = 1
    zz = 1 / aa - 1
    rsdfactor = float(100 / (aa**2 * cosmo.H(zz).value**1))
    print('\nRsdfactor used is : ', rsdfactor)

    hpos = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/PeakPosition//'
    )[1:int(bs**3 * numd)]
    hvel = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/CMVelocity//'
    )[1:int(bs**3 * numd)]
    rsdpos = hpos + hvel * rsdfactor * np.array([0, 0, 1])
    print('Effective displacement : ', (hvel[:, -1] * rsdfactor).std())
    hmass = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/Mass//'
    )[1:int(bs**3 * numd)].flatten()

    meshpos = tools.paintcic(rsdpos, bs, nc)
    meshmass = tools.paintcic(rsdpos, bs, nc, hmass.flatten() * 1e10)
    data = meshmass
    kv = tools.fftk([nc, nc, nc], bs, symmetric=True, dtype=np.float32)
    datasm = tools.fingauss(data, kv, 3, np.pi * nc / bs)
    ic, data = np.expand_dims(ic, 0), np.expand_dims(data,
                                                     0).astype(np.float32)
    datasm = np.expand_dims(datasm, 0).astype(np.float32)
    print("Min in data : %0.4e" % datasm.min())

    #

    ####################################################

    stdinit = srecon.standardinit(bs, nc, meshpos, hpos, final, R=8)
    recon_estimator = tf.estimator.Estimator(model_fn=model_fn,
                                             model_dir=fpath)

    def predict_input_fn(data=data,
                         M0=0.,
                         w=3.,
                         R0=0.,
                         off=None,
                         istd=None,
                         x0=None):
        features = {}
        features['datasm'] = data
        features['rsdfactor'] = rsdfactor
        features['M0'] = M0
        features['w'] = w
        features['R0'] = R0
        features['off'] = off
        features['istd'] = istd
        features['x0'] = x0
        return features, None

    eval_results = recon_estimator.predict(
        input_fn=lambda: predict_input_fn(x0=ic), yield_single_examples=False)

    for i, pred in enumerate(eval_results):
        if i > 0: break

    suff = '-model'
    dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data],
                 fpath + '/figs/')
    dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data],
                  fpath + '/figs/', bs)
    np.save(fpath + '/reconmeshes/ic_true' + suff, pred['ic'])
    np.save(fpath + '/reconmeshes/fin_true' + suff, pred['final'])
    np.save(fpath + '/reconmeshes/model_true' + suff, pred['model'])

    #
    randominit = np.random.normal(size=data.size).reshape(data.shape)
    #eval_results = recon_estimator.predict(input_fn=lambda : predict_input_fn(x0 = np.expand_dims(stdinit, 0)), yield_single_examples=False)
    eval_results = recon_estimator.predict(
        input_fn=lambda: predict_input_fn(x0=randominit),
        yield_single_examples=False)

    for i, pred in enumerate(eval_results):
        if i > 0: break

    suff = '-init'
    dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data],
                 fpath + '/figs/')
    dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data],
                  fpath + '/figs/', bs)
    np.save(fpath + '/reconmeshes/ic_init' + suff, pred['ic'])
    np.save(fpath + '/reconmeshes/fin_init' + suff, pred['final'])
    np.save(fpath + '/reconmeshes/model_init' + suff, pred['model'])

    #
    # Train and evaluate model.
    mms = [1e12, 1e11]
    wws = [1., 2., 3.]
    RRs = [4., 2., 1., 0.5, 0.]
    niter = 100
    iiter = 0

    for mm in mms:

        noisefile = '/project/projectdirs/m3058/chmodi/cosmo4d/train/L0400_N0128_05step-n10/width_3/Wts_30_10_1/r1rf1/hlim-13_nreg-43_batch-5/eluWts-10_5_1/blim-20_nreg-23_batch-100/hist_M%d_na.txt' % (
            np.log10(mm) * 10)
        offset, ivar = setnoise(datasm, noisefile, noisevar=0.25)
        istd = ivar**0.5
        if not FLAGS.offset: offset = None
        if not FLAGS.istd: istd = None

        for R0 in RRs:

            for ww in wws:

                print('\nFor iteration %d\n' % iiter)
                print('With mm=%0.2e, R0=%0.2f, ww=%d \n' % (mm, R0, ww))

                def train_input_fn():
                    features = {}
                    features['datasm'] = datasm
                    features['rsdfactor'] = rsdfactor
                    features['M0'] = mm
                    features['w'] = ww
                    features['R0'] = R0
                    features['off'] = offset
                    features['istd'] = istd
                    features['x0'] = np.expand_dims(
                        stdinit, 0
                    )  #np.random.normal(size=datasm.size).reshape(datasm.shape)
                    features['lr'] = 0.01
                    return features, None

                recon_estimator.train(input_fn=train_input_fn,
                                      max_steps=iiter + niter)
                eval_results = recon_estimator.predict(
                    input_fn=predict_input_fn, yield_single_examples=False)

                for i, pred in enumerate(eval_results):
                    if i > 0: break

                iiter += niter  #
                suff = '-%d-M%d-R%d-w%d' % (iiter, np.log10(mm), R0, ww)
                dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data],
                             fpath + '/figs/')
                dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data],
                              fpath + '/figs/', bs)
                suff = '-M%d-R%d-w%d' % (np.log10(mm), R0, ww)
                np.save(fpath + '/reconmeshes/ic' + suff, pred['ic'])
                np.save(fpath + '/reconmeshes/fin' + suff, pred['final'])
                np.save(fpath + '/reconmeshes/model' + suff, pred['model'])

        RRs = [1., 0.5, 0.]
        wws = [3.]
        niter = 200

    sys.exit(0)

    ##
    exit(0)
Exemple #15
0
def main(_):

    infield = True
    dtype = tf.float32
    mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
    nc, bs = FLAGS.nc, FLAGS.box_size
    a0, a, nsteps = FLAGS.a0, FLAGS.af, FLAGS.nsteps
    stages = np.linspace(a0, a, nsteps, endpoint=True)
    numd = 1e-3

    startw = time.time()

    print(mesh_shape)

    #layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
    #mesh_shape = [("row", FLAGS.nx), ("col", FLAGS.ny)]
    layout_rules = [("nx_lr", "row"), ("ny_lr", "col"), ("nx", "row"),
                    ("ny", "col"), ("ty", "row"), ("tz", "col"),
                    ("ty_lr", "row"), ("tz_lr", "col"), ("nx_block", "row"),
                    ("ny_block", "col")]

    # Resolve the cluster from SLURM environment
    cluster = tf.distribute.cluster_resolver.SlurmClusterResolver(
        {"mesh": mesh_shape.size // FLAGS.gpus_per_task},
        port_base=8822,
        gpus_per_node=FLAGS.gpus_per_node,
        gpus_per_task=FLAGS.gpus_per_task,
        tasks_per_node=FLAGS.tasks_per_node)
    cluster_spec = cluster.cluster_spec()
    print(cluster_spec)
    # Create a server for all mesh members
    server = tf.distribute.Server(cluster_spec, "mesh", cluster.task_id)
    print(server)

    if cluster.task_id > 0:
        server.join()

    # Otherwise we are the main task, let's define the devices
    devices = [
        "/job:mesh/task:%d/device:GPU:%d" % (i, j)
        for i in range(cluster_spec.num_tasks("mesh"))
        for j in range(FLAGS.gpus_per_task)
    ]
    print("List of devices", devices)

    mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
        mesh_shape, layout_rules, devices)

    ##Begin here
    klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)

    final = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/mesh/d/'
    )
    ic = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/mesh/s/'
    )

    pypath = '/global/cscratch1/sd/chmodi/cosmo4d/output/version2/L0400_N0128_05step-fof/lhd_S0100/n10/opt_s999_iM12-sm3v25off/meshes/'
    fin = tools.readbigfile(pypath + 'decic//')

    hpos = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/PeakPosition//'
    )[1:int(bs**3 * numd)]
    hmass = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/Mass//'
    )[1:int(bs**3 * numd)].flatten()

    #meshpos = tools.paintcic(hpos, bs, nc)
    meshmass = tools.paintcic(hpos, bs, nc, hmass.flatten() * 1e10)
    data = meshmass
    kv = tools.fftk([nc, nc, nc], bs, symmetric=True, dtype=np.float32)
    datasm = tools.fingauss(data, kv, 3, np.pi * nc / bs)
    ic, data = np.expand_dims(ic, 0), np.expand_dims(data,
                                                     0).astype(np.float32)
    datasm = np.expand_dims(datasm, 0).astype(np.float32)
    print("Min in data : %0.4e" % datasm.min())

    ic, data = np.expand_dims(ic, 0), np.expand_dims(data,
                                                     0).astype(np.float32)
    np.save(fpath + 'ic', ic)
    np.save(fpath + 'data', data)

    ####################################################
    tf.reset_default_graph()
    print('ic constructed')

    graph = mtf.Graph()
    mesh = mtf.Mesh(graph, "my_mesh")

    initial_conditions, data_field, loss, var_grads, update_op, linear_op, input_field, lr, R0, M0, width, chisq, prior, tf_off, tf_istd = recon_prototype(
        mesh, datasm, nc=FLAGS.nc, batch_size=FLAGS.batch_size, dtype=dtype)

    # Lower mesh computation

    start = time.time()
    lowering = mtf.Lowering(graph, {mesh: mesh_impl})
    restore_hook = mtf.MtfRestoreHook(lowering)
    end = time.time()
    print('\n Time for lowering : %f \n' % (end - start))

    tf_initc = lowering.export_to_tf_tensor(initial_conditions)
    tf_data = lowering.export_to_tf_tensor(data_field)
    tf_chisq = lowering.export_to_tf_tensor(chisq)
    tf_prior = lowering.export_to_tf_tensor(prior)
    tf_grads = lowering.export_to_tf_tensor(var_grads[0])
    #tf_lr = lowering.export_to_tf_tensor(lr)
    tf_linear_op = lowering.lowered_operation(linear_op)
    tf_update_ops = lowering.lowered_operation(update_op)
    n_block_x, n_block_y, n_block_z = FLAGS.nx, FLAGS.ny, 1
    nc = FLAGS.nc

    with tf.Session(server.target) as sess:

        start = time.time()
        sess.run(tf_linear_op, feed_dict={input_field: ic})
        ic_check, data_check = sess.run([tf_initc, tf_data], {width: 3})

        dg.saveimfig('-check', [ic_check, data_check], [ic, data],
                     fpath + '/figs/')
        dg.save2ptfig('-check', [ic_check, data_check], [ic, data],
                      fpath + '/figs/', bs)
        print('Total time taken for mesh thingy is : ', time.time() - start)

        sess.run(tf_linear_op,
                 feed_dict={
                     input_field:
                     np.random.normal(size=ic.size).reshape(ic.shape)
                 })
        ic0, data0 = sess.run([tf_initc, tf_data], {width: 3})
        dg.saveimfig('-init', [ic0, data0], [ic, data], fpath)
        start = time.time()

        titer = 20
        niter = 101
        iiter = 0

        start0 = time.time()
        RRs = [4, 2, 1, 0.5, 0]
        wws = [1, 2, 3]
        lrs = np.array([0.1, 0.1, 0.1, 0.1, 0.1]) * 2
        #lrs = [0.1, 0.05, 0.01, 0.005, 0.001]

        readin = True
        mm0, ww0, RR0 = 1e12, 3, 0.5
        if readin:
            icread = np.load(fpath + '/figs-M%02d-R%02d-w%01d/ic_recon.npy' %
                             (np.log10(mm0), 10 * RR0, ww0))
            sess.run(tf_linear_op, feed_dict={input_field: icread})

        for mm in [1e12, 1e11]:
            print('Fraction of points above 1 for mm = %0.2e: ' % mm,
                  (datasm > mm).sum() / datasm.size)
            noisefile = '/project/projectdirs/m3058/chmodi/cosmo4d/train/L0400_N0128_05step-n10/width_3/Wts_30_10_1/r1rf1/hlim-13_nreg-43_batch-5/eluWts-10_5_1/blim-20_nreg-23_batch-100/hist_M%d_na.txt' % (
                np.log10(mm) * 10)
            offset, ivar = setnoise(datasm, noisefile, noisevar=0.25)
            for iR, zlR in enumerate(zip(RRs, lrs)):
                RR, lR = zlR
                for ww in wws:
                    for ff in [
                            fpath + '/figs-M%02d-R%02d-w%01d' %
                        (np.log10(mm), 10 * RR, ww)
                    ]:
                        try:
                            os.makedirs(ff)
                        except Exception as e:
                            print(e)
                    if readin:
                        if mm > mm0: continue
                        elif mm == mm0 and RR > RR0:
                            print(RR, RR0, RRs)
                            continue
                        elif RR == RR0 and ww <= ww0:
                            print(ww, ww0, wws)
                            continue
                        else:
                            print('Starting from %0.2e' % mm, RR, ww)
                    print('Do for %0.2e' % mm, RR, ww)

                    for i in range(niters[iR]):
                        iiter += 1
                        sess.run(
                            tf_update_ops, {
                                lr: lR,
                                M0: mm,
                                R0: RR,
                                width: ww,
                                tf_off: offset,
                                tf_istd: ivar**0.5
                            })
                        if (i % titer == 0):
                            end = time.time()
                            print('Iter : ', i)
                            print('Time taken for %d iterations: ' % titer,
                                  end - start)
                            start = end

                            ##
                            ic1, data1, cc, pp = sess.run(
                                [tf_initc, tf_data, tf_chisq, tf_prior], {
                                    M0: mm,
                                    R0: RR,
                                    width: ww,
                                    tf_off: offset,
                                    tf_istd: ivar**0.5
                                })
                            print('Chisq and prior are : ', cc, pp)

                            dg.saveimfig(i, [ic1, data1], [ic, data], ff)
                            dg.save2ptfig(i, [ic1, data1], [ic, data], ff, bs)

                    ic1, data1 = sess.run([tf_initc, tf_data], {width: ww})
                    np.save(ff + '/ic_recon', ic1)
                    np.save(ff + '/data_recon', data1)
                    dg.saveimfig(iiter, [ic1, data1], [ic, data],
                                 fpath + '/figs')
                    dg.save2ptfig(iiter, [ic1, data1], [ic, data],
                                  fpath + '/figs', bs)

            wws = [3]
            RRs = [0]
            niters = [201, 101, 201]
            lrs = np.array([0.1, 0.1, 0.1])

        ic1, data1 = sess.run([tf_initc, tf_data], {width: 3})
        print('Total time taken for %d iterations is : ' % iiter,
              time.time() - start0)

    dg.saveimfig('', [ic1, data1], [ic, data], fpath)
    dg.save2ptfig('', [ic1, data1], [ic, data], fpath, bs)

    np.save(fpath + 'ic_recon', ic1)
    np.save(fpath + 'data_recon', data1)
    print('Total wallclock time is : ', time.time() - start0)

    ##
    exit(0)
Exemple #16
0
    #output folder
    suffix = 'nc0norm/'
    ofolder = './saved/L%04d_N%04d_S%04d_galmodel/' % (bs, nc, seed)
    if anneal: ofolder += 'anneal%d/' % len(R0s)
    else: ofolder += '/noanneal/'
    ofolder = ofolder + suffix
    try:
        os.makedirs(ofolder)
    except:
        pass
    print('Output in ofolder = \n%s' % ofolder)
    pkfile = '../flowpm/Planck15_a1p00.txt'
    config = Config(bs=bs, nc=nc, seed=seed, pkfile=pkfile)

    #Generate Data
    truth = tools.readbigfile(dpath + ftype % (bs, nc, seed, step) + 'mesh/s/')
    final = tools.readbigfile(dpath + ftype % (bs, nc, seed, step) + 'mesh/d/')
    #
    hpath = dpath + ftype % (bs, ncf, seed, stepf) + 'galaxies_n05/galcat/'
    hposd = tools.readbigfile(hpath + 'Position/')
    massd = tools.readbigfile(hpath + 'Mass/').reshape(-1) * 1e10
    galtype = tools.readbigfile(hpath + 'gal_type/').reshape(-1).astype(bool)
    allgal = tools.paintnn(hposd, bs, nc)
    satmesh = tools.paintnn(hposd[galtype], bs, nc)
    cenmesh = tools.paintnn(hposd[~galtype], bs, nc)
    data = np.stack((cenmesh, satmesh), axis=-1)

    np.save(ofolder + '/truth.f4', truth)
    np.save(ofolder + '/data.f4', data)

    ###
def main(_):

    infield = True
    dtype = tf.float32
    mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
    nc, bs = FLAGS.nc, FLAGS.box_size
    a0, a, nsteps = FLAGS.a0, FLAGS.af, FLAGS.nsteps
    stages = np.linspace(a0, a, nsteps, endpoint=True)
    numd = 1e-3

    ##Begin here
    klin = np.loadtxt('../data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)

    #pypath = '/global/cscratch1/sd/chmodi/cosmo4d/output/version2/L0400_N0128_05step-fof/lhd_S0100/n10/opt_s999_iM12-sm3v25off/meshes/'
    final = tools.readbigfile('../data//L0400_N0128_S0100_05step/mesh/d/')
    ic = tools.readbigfile('../data/L0400_N0128_S0100_05step/mesh/s/')
    fpos = tools.readbigfile(
        '../data/L0400_N0128_S0100_05step/dynamic/1/Position/')

    hpos = tools.readbigfile(
        '../data/L0400_N0512_S0100_40step/FOF/PeakPosition//')[1:int(bs**3 *
                                                                     numd)]
    hmass = tools.readbigfile(
        '../data/L0400_N0512_S0100_40step/FOF/Mass//')[1:int(bs**3 *
                                                             numd)].flatten()

    meshpos = tools.paintcic(hpos, bs, nc)
    meshmass = tools.paintcic(hpos, bs, nc, hmass.flatten() * 1e10)
    data = meshmass
    data /= data.mean()
    data -= 1
    kv = tools.fftk([nc, nc, nc], bs, symmetric=True, dtype=np.float32)
    datasm = tools.fingauss(data, kv, 3, np.pi * nc / bs)
    ic, data = np.expand_dims(ic, 0), np.expand_dims(data,
                                                     0).astype(np.float32)
    datasm = np.expand_dims(datasm, 0).astype(np.float32)
    print("Min in data : %0.4e" % datasm.min())

    np.save(fpath + 'ic', ic)
    np.save(fpath + 'data', data)

    ####################################################
    #
    tf.reset_default_graph()
    tfic = tf.constant(ic.astype(np.float32))
    state = lpt_init(tfic, a0=0.1, order=1)
    final_state = nbody(state, stages, FLAGS.nc)
    tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0])
    with tf.Session() as sess:
        state = sess.run(final_state)

    fpos = state[0, 0] * bs / nc
    bparams, bmodel = getbias(bs, nc, data[0] + 1, ic[0], fpos)
    #bmodel += 1 #np.expand_dims(bmodel, 0) + 1
    errormesh = data - np.expand_dims(bmodel, 0)
    kerror, perror = tools.power(errormesh[0] + 1, boxsize=bs)
    kerror, perror = kerror[1:], perror[1:]
    print("Error power spectra", kerror, perror)
    print("\nkerror", kerror.min(), kerror.max(), "\n")
    print("\nperror", perror.min(), perror.max(), "\n")
    suff = "-error"
    dg.saveimfig(suff, [ic, errormesh], [ic, data], fpath + '/figs/')
    dg.save2ptfig(suff, [ic, errormesh], [ic, data], fpath + '/figs/', bs)
    ipkerror = iuspline(kerror, perror)

    ####################################################

    #stdinit = srecon.standardinit(bs, nc, meshpos, hpos, final, R=8)

    recon_estimator = tf.estimator.Estimator(model_fn=model_fn,
                                             model_dir=fpath)

    def predict_input_fn(data=data,
                         M0=0.,
                         w=3.,
                         R0=0.,
                         off=None,
                         istd=None,
                         x0=None):
        features = {}
        features['datasm'] = data
        features['R0'] = R0
        features['x0'] = x0
        features['bparams'] = bparams
        features['ipkerror'] = [kerror, perror]  #ipkerror
        return features, None

    eval_results = recon_estimator.predict(
        input_fn=lambda: predict_input_fn(x0=ic), yield_single_examples=False)

    for i, pred in enumerate(eval_results):
        if i > 0: break

    suff = '-model'
    dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data],
                 fpath + '/figs/')
    dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data],
                  fpath + '/figs/', bs)
    np.save(fpath + '/reconmeshes/ic_true' + suff, pred['ic'])
    np.save(fpath + '/reconmeshes/fin_true' + suff, pred['final'])
    np.save(fpath + '/reconmeshes/model_true' + suff, pred['model'])

    #
    randominit = np.random.normal(size=data.size).reshape(data.shape)
    #eval_results = recon_estimator.predict(input_fn=lambda : predict_input_fn(x0 = np.expand_dims(stdinit, 0)), yield_single_examples=False)
    eval_results = recon_estimator.predict(
        input_fn=lambda: predict_input_fn(x0=randominit),
        yield_single_examples=False)

    for i, pred in enumerate(eval_results):
        if i > 0: break

    suff = '-init'
    dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data],
                 fpath + '/figs/')
    dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data],
                  fpath + '/figs/', bs)
    np.save(fpath + '/reconmeshes/ic_init' + suff, pred['ic'])
    np.save(fpath + '/reconmeshes/fin_init' + suff, pred['final'])
    np.save(fpath + '/reconmeshes/model_init' + suff, pred['model'])

    #
    # Train and evaluate model.
    RRs = [4., 2., 1., 0.5, 0.]
    niter = 100
    iiter = 0

    for R0 in RRs:

        print('\nFor iteration %d\n' % iiter)
        print('With  R0=%0.2f \n' % (R0))

        def train_input_fn():
            features = {}
            features['datasm'] = data
            features['R0'] = R0
            features['bparams'] = bparams
            features['ipkerror'] = [kerror, perror]  #ipkerror
            #features['x0'] = np.expand_dims(stdinit, 0)
            features['x0'] = randominit
            features['lr'] = 0.01
            return features, None

        recon_estimator.train(input_fn=train_input_fn, max_steps=iiter + niter)
        eval_results = recon_estimator.predict(input_fn=predict_input_fn,
                                               yield_single_examples=False)

        for i, pred in enumerate(eval_results):
            if i > 0: break

        iiter += niter  #
        suff = '-%d-R%d' % (iiter, R0)
        dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data],
                     fpath + '/figs/')
        dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data],
                      fpath + '/figs/', bs)
        np.save(fpath + '/reconmeshes/ic' + suff, pred['ic'])
        np.save(fpath + '/reconmeshes/fin' + suff, pred['final'])
        np.save(fpath + '/reconmeshes/model' + suff, pred['model'])

    sys.exit(0)

    ##
    exit(0)
def main():
    #bs, nc = 400, 64
    #ncf, stepf = nc*4, 40
    numd = 1e-3
    num = int(numd*bs**3)
    seed = 100     


    path = '//mnt/ceph/users/cmodi/cosmo4d/z00/'
    dyn = "%02dstep_B1"%nsteps
    dynf = "%02dstep_B1"%nstepsf
    hpath = path + '/L%04d_N%04d_%s//'%(bs, ncf, dynf)
    path = path + '/L%04d_N%04d_%s//'%(bs, nc, dyn)

    ic = tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/s/'%(bs, nc, seed, nsteps))
    final = tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/d/'%(bs, nc, seed, nsteps))
    
    hpos = tools.readbigfile(hpath + '/L%04d_N%04d_S%04d_%02dstep/FOF/PeakPosition/'%(bs, ncf, seed, nstepsf))[:num]
    hmassall = tools.readbigfile(hpath + '/L%04d_N%04d_S%04d_%02dstep/FOF/Mass/'%(bs, ncf, seed, nstepsf)).flatten()
    print(hmassall.shape, hmassall.shape[0]/bs**3, hmassall.shape[0]/bs**3 /numd)
    hmass = hmassall[:num]
    print(hmass.shape, hmass.shape[0]/bs**3, hmass.shape[0]/bs**3 /numd)
    hmeshpos = tools.paintcic(hpos, bs, nc)
    hmeshmass = tools.paintcic(hpos, bs, nc, hmass.flatten()*1e10)
    hmeshmass /= hmeshmass.mean()
    hmeshmass -= 1
    hmeshpos /= hmeshpos.mean()
    hmeshpos -= 1

    if posdata: data = tf.constant(hmeshpos.reshape(1, nc, nc, nc), dtype=tf.float32)
    else: data = tf.constant(hmeshmass.reshape(1, nc, nc, nc), dtype=tf.float32)
    
    base = hmeshpos
    #base = (base - base.mean())/base.mean()
    pfin = tools.power(final, boxsize=bs)[1]
    ph = tools.power(1+base, boxsize=bs)[1]
    bias = ((ph[1:5]/pfin[1:5])**0.5).mean()

    tfdisplaced, tfrandom = standardrecon(data, tf.expand_dims(tf.constant(hpos, dtype=tf.float32), 0),
                                          tf.constant(bias, dtype=tf.float32), R=tf.constant(8, dtype=tf.float32))
    
    displaced, random = tfdisplaced.numpy()[0], tfrandom.numpy()[0]

    displaced /= displaced.mean()
    displaced -= 1
    random /= random.mean()
    random -= 1
    recon = np.squeeze(displaced - random)
    print(recon.mean())
    print(displaced.shape, random.shape)

    import matplotlib.pyplot as plt
    plt.figure(figsize = (9, 4))
    plt.subplot(131)
    plt.imshow(ic.sum(axis=0))
    plt.subplot(132)
    plt.imshow(data.numpy()[0].sum(axis=0))
    plt.subplot(133)
    plt.imshow(recon.sum(axis=0))
    plt.savefig('tmp.png')
    plt.close()

    print(ic.mean(),  recon.mean())
    k, p1 = tools.power(ic+1, boxsize=bs)
    p2 = tools.power(recon+1, boxsize=bs)[1]
    px = tools.power(ic+1, f2=recon+1, boxsize=bs)[1]
    plt.plot(k, p2/p1)
    plt.plot(k, px/(p1*p2)**0.5, '--')
    plt.semilogx()
    plt.savefig('tmp2.png')
    plt.close()


    for R in [4, 8, 16, 24, 32, 64, 128, 200, 256]:
        tfdisplaced, tfrandom = standardrecon(data, tf.expand_dims(tf.constant(hpos, dtype=tf.float32), 0),
                                          tf.constant(bias, dtype=tf.float32), R=tf.constant(R, dtype=tf.float32))
    
        displaced, random = tfdisplaced.numpy()[0], tfrandom.numpy()[0]

        displaced /= displaced.mean()
        displaced -= 1
        random /= random.mean()
        random -= 1
        recon = np.squeeze(displaced - random)

        print(ic.mean(),  recon.mean())
        k, p1 = tools.power(ic+1, boxsize=bs)
        p2 = tools.power(recon+1, boxsize=bs)[1]
        px = tools.power(ic+1, f2=recon+1, boxsize=bs)[1]
        #plt.plot(k, p2/p1)
        plt.plot(k, px/(p1*p2)**0.5, '-', label=R)
    plt.semilogx()
    plt.legend()
    plt.semilogx()
    plt.grid(which='both')
    plt.ylim(-0.2, 1.2)
    plt.savefig('stdRcompare.png')
Exemple #19
0
    ofolder = ofolder + suffix
    try:
        os.makedirs(ofolder)
    except:
        pass
    print('Output in ofolder = \n%s' % ofolder)
    pkfile = '../flowpm/Planck15_a1p00.txt'
    config = Config(bs=bs, nc=nc, seed=seed, pkfile=pkfile)
    #hgraph = dg.graphlintomod(config, modpath, pad=pad, ny=1)
    print('Diagnostic graph constructed')
    fname = open(ofolder + '/README', 'w', 1)
    fname.write('Using module from path - %s n' % modpath)
    fname.close()

    #Generate Data
    truth = tools.readbigfile(dpath + ftype % (bs, nc, seed, step) + 'mesh/s/')
    print(truth.shape)
    final = tools.readbigfile(dpath + ftype % (bs, nc, seed, step) + 'mesh/d/')
    print(final.shape)
    hposall = tools.readbigfile(dpath + ftype % (bs, ncf, seed, stepf) +
                                'FOF/PeakPosition/')[1:]
    hposd = hposall[:num].copy()
    data = tools.paintnn(hposd, bs, nc)

    truemeshes = [truth, final, data]
    np.save(ofolder + '/truth.f4', truth)
    np.save(ofolder + '/final.f4', final)
    np.save(ofolder + '/data.f4', data)

    ###
    #Do reconstruction here
Exemple #20
0
    recon = displaced - random
    return recon


if __name__ == "__main__":

    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    bs, nc, step = 400, 128, 5
    ncf, stepf = 512, 40
    seed = 100
    config = tfpmconfig.Config(bs=bs, nc=nc, seed=seed)
    #
    path = '../../data/z00/'
    ftype = 'L%04d_N%04d_S%04d_%02dstep/'
    base, pos, mass = dtools.gethalomesh(bs, nc, seed, getdata=True)
    meshinit = tools.readbigfile(path + ftype % (bs, nc, seed, step) +
                                 'mesh/s/')
    meshfin = tools.readbigfile(path + ftype % (bs, nc, seed, step) +
                                'mesh/d/')

    recon = standardinit(config, base, pos, meshfin, R=8)

    import matplotlib.pyplot as plt
    fig, ax = plt.subplots(2, 2, figsize=(9, 9))
    ax[0, 0].imshow(meshinit.sum(axis=0))
    ax[0, 1].imshow(meshfin.sum(axis=0))
    ax[1, 0].imshow(base.sum(axis=0))
    ax[1, 1].imshow(recon.sum(axis=0))
    plt.savefig('./figs/standard.png')

#    mesh['cic'] = tools.paintcic(partp, bs, nc)
#    mesh['s'] =
Exemple #21
0
    #output folder
    ofolder = './saved/L%04d_N%04d_S%04d_n%02d/' % (bs, nc, seed, numd * 1e4)
    if anneal: ofolder += 'anneal%d/' % len(R0s)
    else: ofolder += '/noanneal/'
    ofolder = ofolder + suffix
    try:
        os.makedirs(ofolder)
    except:
        pass
    print('Output in ofolder = \n%s' % ofolder)
    pkfile = '../flowpm/Planck15_a1p00.txt'
    config = Config(bs=bs, nc=nc, seed=seed, pkfile=pkfile)

    #Generate Data
    truth = tools.readbigfile(dpath + ftypefpm % (bs, nc, seed, step) +
                              'mesh/s/')
    print(truth.shape)
    final = tools.readbigfile(dpath + ftypefpm % (bs, nc, seed, step) +
                              'mesh/d/')
    print(final.shape)
    data = final / final.mean() - 1

    #truemeshes = [truth, final, data]
    truemeshes = [truth, final, data]

    np.save(ofolder + '/truth.f4', truth)
    np.save(ofolder + '/final.f4', final)
    np.save(ofolder + '/data.f4', data)

    ###
    #Do reconstruction here
##            #np.save(path + '/fpm-s', np.squeeze(ic))
##            np.save(path + '/fpm-d', np.squeeze(sim))
##        else:
##            print(path + '/fpm-d' + ' exists')
##    else:
##        print(path + ' does not exist')
##        
##



for ss in range(100, 1000, 100):

    path = '../data/z00/L%04d_N%04d_S%04d_%dstep/'%(bs, nc,  ss, nsteps)

    ic = np.expand_dims(tools.readbigfile(path + '/mesh/s/').astype(np.float32), axis=0)
    print(ic.shape)

    initial_conditions = tf.cast(tf.constant(ic), tf.float32) 
        
    print(initial_conditions)

    # Sample particles
    state = flowpm.lpt_init(initial_conditions, a0=ainit)   

    # Evolve particles down to z=0
    final_state = flowpm.nbody(state, stages, nc)         

    # Retrieve final density field
    final_field = flowpm.cic_paint(tf.zeros_like(initial_conditions), final_state[0])
Exemple #23
0
        os.makedirs(ofolder)
    except:
        pass
    print('Output in ofolder = \n%s' % ofolder)
    pkfile = '../flowpm/Planck15_a1p00.txt'
    config = Config(bs=bs, nc=nc, seed=seed, pkfile=pkfile)
    kmesh = sum(kk**2 for kk in config['kvec'])**0.5
    #hgraph = dg.graphlintomod(config, modpath, pad=pad, ny=1)
    print('Diagnostic graph constructed')
    fname = open(ofolder + '/README', 'w', 1)
    fname.write('Using module from path - %s \n' % modpath)
    fname.close()
    print('\nUsing module from path - %s \n' % modpath)

    #Generate Data
    truth = tools.readbigfile(dpath + ftypefpm % (bs, nc, seed, step) +
                              'mesh/s/')
    print(truth.shape)
    final = tools.readbigfile(dpath + ftype % (bs, nc, seed, step) + 'mesh/d/')
    print(final.shape)
    hposall = tools.readbigfile(dpath + ftype % (bs, ncf, seed, stepf) +
                                'FOF/PeakPosition/')[1:]
    massall = tools.readbigfile(dpath + ftype % (bs, ncf, seed, stepf) +
                                'FOF/Mass/')[1:].reshape(-1) * 1e10
    massd = massall[:num].copy()
    hposd = hposall[:num].copy()
    #
    if datacic:
        datam = tools.paintcic(hposd, bs, nc, massd)
        datap = tools.paintcic(hposd, bs, nc)
    else:
        datam = tools.paintnn(hposd, bs, nc, massd)
Exemple #24
0
    fig.suptitle(title)
    fig.tight_layout(rect=[0, 0, 1, 0.95])
    fig.savefig(fname)


#######################################################
if __name__ == "__main__":

    bs, nc = 400, 128
    seed = 100
    step = 5

    config = Config(bs=bs, nc=nc, seed=seed)
    modpath = '/home/chmodi/Projects/galmodel/code/models/n10/pad2-logistic/module/1546529135/likelihood'

    truelin = tools.readbigfile(dpath + ftype % (bs, nc, seed, step) +
                                'mesh/s/').astype(np.float32)
    truefin = tools.readbigfile(dpath + ftype % (bs, nc, seed, step) +
                                'mesh/d/').astype(np.float32)
    truedata = dtools.gethalomesh(bs, nc, seed).astype(np.float32)

    g = graphlintomod(config, modpath, pad=2, ny=1)

    reconpath = './saved/L0400_N0128_S0100_n10/noanneal/nc3norm_std/'
    for i in range(0, 100, 25):
        print(i)
        mesh = np.load(reconpath + 'iter%d.f4.npy' % i).reshape(nc, nc, nc)
        savehalofig([truelin, truefin, truedata],
                    mesh,
                    fname=reconpath + 'iter%d.png' % i,
                    hgraph=g,
                    boxsize=bs)
Exemple #25
0
##            print(path + '/fpm-d' + ' exists')
##    else:
##        print(path + ' does not exist')
##
##

for ss in range(1900, 2000, 100):

    print(ss)
    seeds = np.arange(ss, ss + 100, 10)

    ic = []
    for iseed, seed in enumerate(seeds):
        path = '../data/make_data_code/L%d-N%d-B1-T%d/S%d/' % (bs, nc, nsteps,
                                                               seed)
        ic.append(tools.readbigfile(path + '/mesh/s/'))

    ic = np.stack(ic)
    print(ic.shape)
    print(ic.mean())

    initial_conditions = tf.cast(tf.constant(ic), tf.float32)

    print(initial_conditions)

    # Sample particles
    state = flowpm.lpt_init(initial_conditions, a0=ainit)

    # Evolve particles down to z=0
    final_state = flowpm.nbody(state, stages, nc)
Exemple #26
0
sess = tf.Session()
chkname = suff #+'_it%d'%niter

module = hub.Module('./../code/models/n%02d/%s/%s.hub'%(numd*1e4, suff, chkname))
xx = tf.placeholder(tf.float32, shape=[None, cube_sizeft, cube_sizeft, cube_sizeft, nchannels], name='input')
yy = tf.placeholder(tf.float32, shape=[None, cube_size, cube_size, cube_size, 1], name='labels')
output = module(dict(input=xx, label=yy, keepprob=1), as_dict=True)['prediction']
sess = tf.Session()
sess.run(tf.initializers.global_variables())
#
#############################
meshes = {}
cube_features, cube_target = [], []
for seed in seeds:
    mesh = {}
    partp = tools.readbigfile(path + ftype%(bs, nc, seed, step) + 'dynamic/1/Position/')
    mesh['cic'] = tools.paintcic(partp, bs, ncp)
    mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    mesh['GD'] = mesh['R1'] - mesh['R2']
    mesh['s'] = tools.readbigfile(path + ftype%(bs, nc, seed, step) + 'mesh/s/')

    hmesh = {}
    hposall = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/PeakPosition/')[1:]
    hposd = hposall[:num].copy()
    hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
    hmesh['pnn'] = tools.paintnn(hposd, bs, ncp)
    hmesh['target'] = hmesh['pnn'].copy()
    
    print('All the mesh have been generated for seed = %d'%seed)
def all_sim():

    path = '//mnt/ceph/users/cmodi/cosmo4d/z00/'
    dyn = "%02dstep_B1"%nsteps
    dynf = "%02dstep_B1"%nstepsf
    hpath = path + '/L%04d_N%04d_%s//'%(bs, ncf, dynf)
    path = path + '/L%04d_N%04d_%s//'%(bs, nc, dyn)


    for seed in range(100, 601):
        print(seed)
        ic = tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/s/'%(bs, nc, seed, nsteps))
        final = tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/d/'%(bs, nc, seed, nsteps))
        hpos = tools.readbigfile(hpath + '/L%04d_N%04d_S%04d_%02dstep/FOF/PeakPosition/'%(bs, ncf, seed, nstepsf))[:num]
        hmassall = tools.readbigfile(hpath + '/L%04d_N%04d_S%04d_%02dstep/FOF/Mass/'%(bs, ncf, seed, nstepsf)).flatten()
        hmass = hmassall[:num]
        hmeshpos = tools.paintcic(hpos, bs, nc)
        hmeshmass = tools.paintcic(hpos, bs, nc, hmass.flatten()*1e10)
        hmeshmass /= hmeshmass.mean()
        hmeshmass -= 1
        hmeshpos /= hmeshpos.mean()
        hmeshpos -= 1
        
        if posdata: 
            data = tf.constant(hmeshpos.reshape(1, nc, nc, nc), dtype=tf.float32)
        else: data = tf.constant(hmeshmass.reshape(1, nc, nc, nc), dtype=tf.float32)
    
        base = hmeshpos
        pfin = tools.power(final, boxsize=bs)[1]
        ph = tools.power(1+base, boxsize=bs)[1]
        bias = ((ph[1:5]/pfin[1:5])**0.5).mean()
        
        tfdisplaced, tfrandom = standardrecon(data, tf.expand_dims(tf.constant(hpos, dtype=tf.float32), 0),
                                              tf.constant(bias, dtype=tf.float32), R=tf.constant(R, dtype=tf.float32))
        
        displaced, random = tfdisplaced.numpy()[0], tfrandom.numpy()[0]
        
        displaced /= displaced.mean()
        displaced -= 1
        random /= random.mean()
        random -= 1
        recon = np.squeeze(displaced - random)
        savepath =  '//mnt/ceph/users/cmodi/cosmo4d/z00/L%04d_N%04d_D%04d//'%(bs, nc, numd*1e4)
        np.save(savepath + 'stdR%d_S%04d'%(R, seed), recon)


        if seed == 100:
            import matplotlib.pyplot as plt
            plt.figure(figsize = (9, 4))
            plt.subplot(131)
            plt.imshow(ic.sum(axis=0))
            plt.subplot(132)
            plt.imshow(data.numpy()[0].sum(axis=0))
            plt.subplot(133)
            plt.imshow(recon.sum(axis=0))
            plt.savefig('tmp.png')
            plt.close()

            print(ic.mean(),  recon.mean())
            k, p1 = tools.power(ic+1, boxsize=bs)
            p2 = tools.power(recon+1, boxsize=bs)[1]
            px = tools.power(ic+1, f2=recon+1, boxsize=bs)[1]
            plt.plot(k, p2/p1)
            plt.plot(k, px/(p1*p2)**0.5, '--')
            plt.semilogx()
            plt.savefig('tmp2.png')
            plt.close()