コード例 #1
0
ファイル: halo_logistic.py プロジェクト: modichirag/galmodel
def get_meshes(seed, galaxies=False):
    mesh = {}
    mesh['s'] = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                                  'mesh/s/')
    partp = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                              'dynamic/1/Position/')
    mesh['cic'] = tools.paintcic(partp, bs, nc)
    mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    mesh['GD'] = mesh['R1'] - mesh['R2']

    hmesh = {}
    hposall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/PeakPosition/')[1:]
    massall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/Mass/')[1:].reshape(-1) * 1e10
    hposd = hposall[:num].copy()
    massd = massall[:num].copy()
    hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
    hmesh['pnn'] = tools.paintnn(hposd, bs, nc)
    hmesh['pnnsm'] = tools.fingauss(hmesh['pnn'], kk, R1, kny)
    hmesh['mnn'] = tools.paintnn(hposd, bs, nc, massd)

    return mesh, hmesh
コード例 #2
0
def get_meshes(seed, galaxies=False):
    mesh = {}
    mesh['s'] = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                                  'mesh/s/')
    partp = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                              'dynamic/1/Position/')
    mesh['cic'] = tools.paintcic(partp, bs, nc)
    mesh['cicovd'] = mesh['cic'] / mesh['cic'].mean() - 1
    mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    mesh['GD'] = mesh['R1'] - mesh['R2']

    hmesh = {}
    hposall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/PeakPosition/')[1:]
    massall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/Mass/')[1:].reshape(-1) * 1e10
    hposd = hposall[:num].copy()
    massd = massall[:num].copy()
    print(massd[-1] / 1e10)
    hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
    hmesh['pnn'] = tools.paintnn(hposd, bs, nc)
    hmesh['mnn'] = tools.paintnn(hposd, bs, nc, massd)
    hmesh['mcic'] = tools.paintcic(hposd, bs, nc, massd)
    hmesh['mcicovd'] = (hmesh['mcic'] -
                        hmesh['mcic'].mean()) / hmesh['mcic'].mean()
    data = hmesh['mcicovd']
    print(data.min(), data.max(), data.mean(), data.std())

    return mesh, hmesh
コード例 #3
0
ファイル: traintools.py プロジェクト: modichirag/galmodel
def get_meshes(seed, pdict=defdict):
    for i in pdict.keys(): locals()[i] = pdict[i]

    mesh = {}
    mesh['s'] = tools.readbigfile(path + ftypefpm%(bs, nc, seed, step) + 'mesh/s/')
    partp = tools.readbigfile(path + ftypefpm%(bs, nc, seed, step) + 'dynamic/1/Position/')
    mesh['cic'] = tools.paintcic(partp, bs, ncp)
    #mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    mesh['GD'] = mesh['R1'] - mesh['R2']

    hmesh = {}
    hpath = path + ftype%(bs, ncf, seed, stepf) + 'FOF/'
    hposd = tools.readbigfile(hpath + 'PeakPosition/')
    massd = tools.readbigfile(hpath + 'Mass/').reshape(-1)*1e10
    #galtype = tools.readbigfile(hpath + 'gal_type/').reshape(-1).astype(bool)
    hposall = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/PeakPosition/')[1:]    
    massall = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/Mass/')[1:].reshape(-1)*1e10
    hposd = hposall[:num].copy()
    massd = massall[:num].copy()
    #hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
    hmesh['pnn'] = tools.paintnn(hposd, bs, ncp)
    hmesh['mnn'] = tools.paintnn(hposd, bs, ncp, massd)
    #hmesh['pnnsat'] = tools.paintnn(hposd[galtype], bs, ncp)
    #hmesh['pnncen'] = tools.paintnn(hposd[~galtype], bs, ncp)

    return mesh, hmesh
コード例 #4
0
ファイル: gal_logistic.py プロジェクト: modichirag/galmodel
def generate_training_data():
    meshes = {}
    cube_features, cube_target = [[] for i in range(len(cube_sizes))
                                  ], [[] for i in range(len(cube_sizes))]

    for seed in seeds:
        mesh = {}
        partp = tools.readbigfile(path + ftype % (bs, nc, seed, step) +
                                  'dynamic/1/Position/')
        mesh['cic'] = tools.paintcic(partp, bs, ncp)
        #mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
        mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
        #mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
        #mesh['GD'] = mesh['R1'] - mesh['R2']

        hmesh = {}
        hpath = path + ftype % (bs, ncf, seed, stepf) + 'galaxies_n05/galcat/'
        hposd = tools.readbigfile(hpath + 'Position/')
        massd = tools.readbigfile(hpath + 'Mass/').reshape(-1) * 1e10
        galtype = tools.readbigfile(hpath +
                                    'gal_type/').reshape(-1).astype(bool)
        hmesh['pnn'] = tools.paintnn(hposd, bs, ncp)
        hmesh['mnn'] = tools.paintnn(hposd, bs, ncp, massd)
        hmesh['pnnsat'] = tools.paintnn(hposd[galtype], bs, ncp)
        hmesh['pnncen'] = tools.paintnn(hposd[~galtype], bs, ncp)
        meshes[seed] = [mesh, hmesh]

        print('All the mesh have been generated for seed = %d' % seed)

        #Create training voxels
        ftlist = [mesh[i].copy() for i in ftname]
        ftlistpad = [np.pad(i, pad, 'wrap') for i in ftlist]
        #     targetmesh = hmesh['pnn']
        targetmesh = [hmesh[i].copy() for i in tgname]

        for i, size in enumerate(cube_sizes):
            print('For size = ', size)
            if size == nc:
                features = [np.stack(ftlistpad, axis=-1)]
                target = [np.stack(targetmesh, axis=-1)]
            else:
                numcubes = int(num_cubes / size * 4)
                features, target = dtools.randomvoxels(ftlistpad,
                                                       targetmesh,
                                                       numcubes,
                                                       max_offset[i],
                                                       size,
                                                       cube_sizesft[i],
                                                       seed=seed,
                                                       rprob=0)
            cube_features[i] = cube_features[i] + features
            cube_target[i] = cube_target[i] + target

    # #
    for i in range(cube_sizes.size):
        cube_target[i] = np.stack(cube_target[i], axis=0)
        cube_features[i] = np.stack(cube_features[i], axis=0)
        print(cube_features[i].shape, cube_target[i].shape)

    return meshes, cube_features, cube_target
コード例 #5
0
def get_meshes(seed, galaxies=False, inverse=True):
    mesh = {}
    mesh['s'] = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                                  'mesh/s/')
    partp = tools.readbigfile(path + ftypefpm % (bs, nc, seed, step) +
                              'dynamic/1/Position/')
    mesh['cic'] = tools.paintcic(partp, bs, nc)
    mesh['ciclog'] = np.log(1e-3 + mesh['cic'])
    mesh['cicovd'] = mesh['cic'] / mesh['cic'].mean() - 1
    mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    mesh['GD'] = mesh['R1'] - mesh['R2']

    hmesh = {}
    hposall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                'FOF/PeakPosition/')[1:]
    if stellar:
        massall = np.load(path + ftype % (bs, ncf, seed, stepf) +
                          'stellarmass.npy')
    else:
        massall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                                    'FOF/Mass/')[1:].reshape(-1) * 1e10

    hposd = hposall[:num].copy()
    massd = massall[:num].copy()
    print(massall.min() / 1e10, massall.max() / 1e10)
    print(massd.min() / 1e10, massd.max() / 1e10)

    hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
    hmesh['pnn'] = tools.paintnn(hposd, bs, nc)
    hmesh['mnn'] = tools.paintnn(hposd, bs, nc, massd)
    hmesh['mcic'] = tools.paintcic(hposd, bs, nc, massd)
    hmesh['mcicnomean'] = (hmesh['mcic']) / hmesh['mcic'].mean()
    hmesh['mcicovd'] = (hmesh['mcic'] -
                        hmesh['mcic'].mean()) / hmesh['mcic'].mean()
    hmesh['pcicovd'] = (hmesh['pcic'] -
                        hmesh['pcic'].mean()) / hmesh['pcic'].mean()
    hmesh['pcicovdR3'] = tools.fingauss(hmesh['pcicovd'], kk, R1, kny)

    if inverse: return hmesh, mesh
    else: return mesh, hmesh
コード例 #6
0
module = hub.Module('./../code/models/n%02d/%s/%s.hub'%(numd*1e4, suff, chkname))
xx = tf.placeholder(tf.float32, shape=[None, cube_sizeft, cube_sizeft, cube_sizeft, nchannels], name='input')
yy = tf.placeholder(tf.float32, shape=[None, cube_size, cube_size, cube_size, 1], name='labels')
output = module(dict(input=xx, label=yy, keepprob=1), as_dict=True)['prediction']
sess = tf.Session()
sess.run(tf.initializers.global_variables())
#
#############################
meshes = {}
cube_features, cube_target = [], []
for seed in seeds:
    mesh = {}
    partp = tools.readbigfile(path + ftype%(bs, nc, seed, step) + 'dynamic/1/Position/')
    mesh['cic'] = tools.paintcic(partp, bs, ncp)
    mesh['decic'] = tools.decic(mesh['cic'], kk, kny)
    mesh['R1'] = tools.fingauss(mesh['cic'], kk, R1, kny)
    mesh['R2'] = tools.fingauss(mesh['cic'], kk, R2, kny)
    mesh['GD'] = mesh['R1'] - mesh['R2']
    mesh['s'] = tools.readbigfile(path + ftype%(bs, nc, seed, step) + 'mesh/s/')

    hmesh = {}
    hposall = tools.readbigfile(path + ftype%(bs, ncf, seed, stepf) + 'FOF/PeakPosition/')[1:]
    hposd = hposall[:num].copy()
    hmesh['pcic'] = tools.paintcic(hposd, bs, nc)
    hmesh['pnn'] = tools.paintnn(hposd, bs, ncp)
    hmesh['target'] = hmesh['pnn'].copy()
    
    print('All the mesh have been generated for seed = %d'%seed)

    #Create training voxels
    ftlist = [mesh[i].copy() for i in ftname]
コード例 #7
0
def main(_):

    infield = True
    dtype = tf.float32
    mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
    nc, bs = FLAGS.nc, FLAGS.box_size
    a0, a, nsteps = FLAGS.a0, FLAGS.af, FLAGS.nsteps
    stages = np.linspace(a0, a, nsteps, endpoint=True)
    numd = 1e-3

    ##Begin here
    klin = np.loadtxt('../data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)

    #pypath = '/global/cscratch1/sd/chmodi/cosmo4d/output/version2/L0400_N0128_05step-fof/lhd_S0100/n10/opt_s999_iM12-sm3v25off/meshes/'
    final = tools.readbigfile('../data//L0400_N0128_S0100_05step/mesh/d/')
    ic = tools.readbigfile('../data/L0400_N0128_S0100_05step/mesh/s/')
    fpos = tools.readbigfile(
        '../data/L0400_N0128_S0100_05step/dynamic/1/Position/')

    hpos = tools.readbigfile(
        '../data/L0400_N0512_S0100_40step/FOF/PeakPosition//')[1:int(bs**3 *
                                                                     numd)]
    hmass = tools.readbigfile(
        '../data/L0400_N0512_S0100_40step/FOF/Mass//')[1:int(bs**3 *
                                                             numd)].flatten()

    meshpos = tools.paintcic(hpos, bs, nc)
    meshmass = tools.paintcic(hpos, bs, nc, hmass.flatten() * 1e10)
    data = meshmass
    data /= data.mean()
    data -= 1
    kv = tools.fftk([nc, nc, nc], bs, symmetric=True, dtype=np.float32)
    datasm = tools.fingauss(data, kv, 3, np.pi * nc / bs)
    ic, data = np.expand_dims(ic, 0), np.expand_dims(data,
                                                     0).astype(np.float32)
    datasm = np.expand_dims(datasm, 0).astype(np.float32)
    print("Min in data : %0.4e" % datasm.min())

    np.save(fpath + 'ic', ic)
    np.save(fpath + 'data', data)

    ####################################################
    #
    tf.reset_default_graph()
    tfic = tf.constant(ic.astype(np.float32))
    state = lpt_init(tfic, a0=0.1, order=1)
    final_state = nbody(state, stages, FLAGS.nc)
    tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0])
    with tf.Session() as sess:
        state = sess.run(final_state)

    fpos = state[0, 0] * bs / nc
    bparams, bmodel = getbias(bs, nc, data[0] + 1, ic[0], fpos)
    #bmodel += 1 #np.expand_dims(bmodel, 0) + 1
    errormesh = data - np.expand_dims(bmodel, 0)
    kerror, perror = tools.power(errormesh[0] + 1, boxsize=bs)
    kerror, perror = kerror[1:], perror[1:]
    print("Error power spectra", kerror, perror)
    print("\nkerror", kerror.min(), kerror.max(), "\n")
    print("\nperror", perror.min(), perror.max(), "\n")
    suff = "-error"
    dg.saveimfig(suff, [ic, errormesh], [ic, data], fpath + '/figs/')
    dg.save2ptfig(suff, [ic, errormesh], [ic, data], fpath + '/figs/', bs)
    ipkerror = iuspline(kerror, perror)

    ####################################################

    #stdinit = srecon.standardinit(bs, nc, meshpos, hpos, final, R=8)

    recon_estimator = tf.estimator.Estimator(model_fn=model_fn,
                                             model_dir=fpath)

    def predict_input_fn(data=data,
                         M0=0.,
                         w=3.,
                         R0=0.,
                         off=None,
                         istd=None,
                         x0=None):
        features = {}
        features['datasm'] = data
        features['R0'] = R0
        features['x0'] = x0
        features['bparams'] = bparams
        features['ipkerror'] = [kerror, perror]  #ipkerror
        return features, None

    eval_results = recon_estimator.predict(
        input_fn=lambda: predict_input_fn(x0=ic), yield_single_examples=False)

    for i, pred in enumerate(eval_results):
        if i > 0: break

    suff = '-model'
    dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data],
                 fpath + '/figs/')
    dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data],
                  fpath + '/figs/', bs)
    np.save(fpath + '/reconmeshes/ic_true' + suff, pred['ic'])
    np.save(fpath + '/reconmeshes/fin_true' + suff, pred['final'])
    np.save(fpath + '/reconmeshes/model_true' + suff, pred['model'])

    #
    randominit = np.random.normal(size=data.size).reshape(data.shape)
    #eval_results = recon_estimator.predict(input_fn=lambda : predict_input_fn(x0 = np.expand_dims(stdinit, 0)), yield_single_examples=False)
    eval_results = recon_estimator.predict(
        input_fn=lambda: predict_input_fn(x0=randominit),
        yield_single_examples=False)

    for i, pred in enumerate(eval_results):
        if i > 0: break

    suff = '-init'
    dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data],
                 fpath + '/figs/')
    dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data],
                  fpath + '/figs/', bs)
    np.save(fpath + '/reconmeshes/ic_init' + suff, pred['ic'])
    np.save(fpath + '/reconmeshes/fin_init' + suff, pred['final'])
    np.save(fpath + '/reconmeshes/model_init' + suff, pred['model'])

    #
    # Train and evaluate model.
    RRs = [4., 2., 1., 0.5, 0.]
    niter = 100
    iiter = 0

    for R0 in RRs:

        print('\nFor iteration %d\n' % iiter)
        print('With  R0=%0.2f \n' % (R0))

        def train_input_fn():
            features = {}
            features['datasm'] = data
            features['R0'] = R0
            features['bparams'] = bparams
            features['ipkerror'] = [kerror, perror]  #ipkerror
            #features['x0'] = np.expand_dims(stdinit, 0)
            features['x0'] = randominit
            features['lr'] = 0.01
            return features, None

        recon_estimator.train(input_fn=train_input_fn, max_steps=iiter + niter)
        eval_results = recon_estimator.predict(input_fn=predict_input_fn,
                                               yield_single_examples=False)

        for i, pred in enumerate(eval_results):
            if i > 0: break

        iiter += niter  #
        suff = '-%d-R%d' % (iiter, R0)
        dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data],
                     fpath + '/figs/')
        dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data],
                      fpath + '/figs/', bs)
        np.save(fpath + '/reconmeshes/ic' + suff, pred['ic'])
        np.save(fpath + '/reconmeshes/fin' + suff, pred['final'])
        np.save(fpath + '/reconmeshes/model' + suff, pred['model'])

    sys.exit(0)

    ##
    exit(0)
コード例 #8
0
ファイル: trainsimple.py プロジェクト: modichirag/galmodel
num = int(numd * bs**3)
seed = 100
R1 = 3
R2 = 3 * 1.2
kny = np.pi * ncp / bs
kk = tools.fftk((ncp, ncp, ncp), bs)

suff = '2ftmdg256'
#############################
##Read data and generate meshes
#mesh = tools.readbigfile(path + ftype%(bs, nc, seed, step) + 'mesh/d/')
partp = tools.readbigfile(path + ftype % (bs, nc, seed, step) +
                          'dynamic/1/Position/')
mesh = tools.paintcic(partp, bs, ncp)
meshdecic = tools.decic(mesh, kk, kny)
meshR1 = tools.fingauss(mesh, kk, R1, kny)
meshR2 = tools.fingauss(mesh, kk, R2, kny)
meshdg = meshR1 - meshR2
#ftlist = [meshdecic.copy(), meshR1.copy(), meshdg.copy()]

hposall = tools.readbigfile(path + ftype % (bs, ncf, seed, stepf) +
                            'FOF/PeakPosition/')[1:]
hposd = hposall[:num].copy()
#hposall = hposall[:2*num]

# hpmeshall = tools.paintcic(hposall, bs, nc)
# hpmeshd = tools.paintcic(hposd, bs, nc)

#hpmeshall = tools.paintnn(hposall, bs, ncp)
hpmeshd = tools.paintnn(hposd, bs, ncp)
コード例 #9
0
ファイル: fnnrecon.py プロジェクト: modichirag/galference
def main(_):

    infield = True
    dtype = tf.float32
    mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
    nc, bs = FLAGS.nc, FLAGS.box_size
    a0, a, nsteps = FLAGS.a0, FLAGS.af, FLAGS.nsteps
    stages = np.linspace(a0, a, nsteps, endpoint=True)
    numd = 1e-3

    startw = time.time()

    print(mesh_shape)

    #layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
    #mesh_shape = [("row", FLAGS.nx), ("col", FLAGS.ny)]
    layout_rules = [("nx_lr", "row"), ("ny_lr", "col"), ("nx", "row"),
                    ("ny", "col"), ("ty", "row"), ("tz", "col"),
                    ("ty_lr", "row"), ("tz_lr", "col"), ("nx_block", "row"),
                    ("ny_block", "col")]

    # Resolve the cluster from SLURM environment
    cluster = tf.distribute.cluster_resolver.SlurmClusterResolver(
        {"mesh": mesh_shape.size // FLAGS.gpus_per_task},
        port_base=8822,
        gpus_per_node=FLAGS.gpus_per_node,
        gpus_per_task=FLAGS.gpus_per_task,
        tasks_per_node=FLAGS.tasks_per_node)
    cluster_spec = cluster.cluster_spec()
    print(cluster_spec)
    # Create a server for all mesh members
    server = tf.distribute.Server(cluster_spec, "mesh", cluster.task_id)
    print(server)

    if cluster.task_id > 0:
        server.join()

    # Otherwise we are the main task, let's define the devices
    devices = [
        "/job:mesh/task:%d/device:GPU:%d" % (i, j)
        for i in range(cluster_spec.num_tasks("mesh"))
        for j in range(FLAGS.gpus_per_task)
    ]
    print("List of devices", devices)

    mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
        mesh_shape, layout_rules, devices)

    ##Begin here
    klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)

    final = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/mesh/d/'
    )
    ic = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/mesh/s/'
    )

    pypath = '/global/cscratch1/sd/chmodi/cosmo4d/output/version2/L0400_N0128_05step-fof/lhd_S0100/n10/opt_s999_iM12-sm3v25off/meshes/'
    fin = tools.readbigfile(pypath + 'decic//')

    hpos = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/PeakPosition//'
    )[1:int(bs**3 * numd)]
    hmass = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/Mass//'
    )[1:int(bs**3 * numd)].flatten()

    #meshpos = tools.paintcic(hpos, bs, nc)
    meshmass = tools.paintcic(hpos, bs, nc, hmass.flatten() * 1e10)
    data = meshmass
    kv = tools.fftk([nc, nc, nc], bs, symmetric=True, dtype=np.float32)
    datasm = tools.fingauss(data, kv, 3, np.pi * nc / bs)
    ic, data = np.expand_dims(ic, 0), np.expand_dims(data,
                                                     0).astype(np.float32)
    datasm = np.expand_dims(datasm, 0).astype(np.float32)
    print("Min in data : %0.4e" % datasm.min())

    ic, data = np.expand_dims(ic, 0), np.expand_dims(data,
                                                     0).astype(np.float32)
    np.save(fpath + 'ic', ic)
    np.save(fpath + 'data', data)

    ####################################################
    tf.reset_default_graph()
    print('ic constructed')

    graph = mtf.Graph()
    mesh = mtf.Mesh(graph, "my_mesh")

    initial_conditions, data_field, loss, var_grads, update_op, linear_op, input_field, lr, R0, M0, width, chisq, prior, tf_off, tf_istd = recon_prototype(
        mesh, datasm, nc=FLAGS.nc, batch_size=FLAGS.batch_size, dtype=dtype)

    # Lower mesh computation

    start = time.time()
    lowering = mtf.Lowering(graph, {mesh: mesh_impl})
    restore_hook = mtf.MtfRestoreHook(lowering)
    end = time.time()
    print('\n Time for lowering : %f \n' % (end - start))

    tf_initc = lowering.export_to_tf_tensor(initial_conditions)
    tf_data = lowering.export_to_tf_tensor(data_field)
    tf_chisq = lowering.export_to_tf_tensor(chisq)
    tf_prior = lowering.export_to_tf_tensor(prior)
    tf_grads = lowering.export_to_tf_tensor(var_grads[0])
    #tf_lr = lowering.export_to_tf_tensor(lr)
    tf_linear_op = lowering.lowered_operation(linear_op)
    tf_update_ops = lowering.lowered_operation(update_op)
    n_block_x, n_block_y, n_block_z = FLAGS.nx, FLAGS.ny, 1
    nc = FLAGS.nc

    with tf.Session(server.target) as sess:

        start = time.time()
        sess.run(tf_linear_op, feed_dict={input_field: ic})
        ic_check, data_check = sess.run([tf_initc, tf_data], {width: 3})

        dg.saveimfig('-check', [ic_check, data_check], [ic, data],
                     fpath + '/figs/')
        dg.save2ptfig('-check', [ic_check, data_check], [ic, data],
                      fpath + '/figs/', bs)
        print('Total time taken for mesh thingy is : ', time.time() - start)

        sess.run(tf_linear_op,
                 feed_dict={
                     input_field:
                     np.random.normal(size=ic.size).reshape(ic.shape)
                 })
        ic0, data0 = sess.run([tf_initc, tf_data], {width: 3})
        dg.saveimfig('-init', [ic0, data0], [ic, data], fpath)
        start = time.time()

        titer = 20
        niter = 101
        iiter = 0

        start0 = time.time()
        RRs = [4, 2, 1, 0.5, 0]
        wws = [1, 2, 3]
        lrs = np.array([0.1, 0.1, 0.1, 0.1, 0.1]) * 2
        #lrs = [0.1, 0.05, 0.01, 0.005, 0.001]

        readin = True
        mm0, ww0, RR0 = 1e12, 3, 0.5
        if readin:
            icread = np.load(fpath + '/figs-M%02d-R%02d-w%01d/ic_recon.npy' %
                             (np.log10(mm0), 10 * RR0, ww0))
            sess.run(tf_linear_op, feed_dict={input_field: icread})

        for mm in [1e12, 1e11]:
            print('Fraction of points above 1 for mm = %0.2e: ' % mm,
                  (datasm > mm).sum() / datasm.size)
            noisefile = '/project/projectdirs/m3058/chmodi/cosmo4d/train/L0400_N0128_05step-n10/width_3/Wts_30_10_1/r1rf1/hlim-13_nreg-43_batch-5/eluWts-10_5_1/blim-20_nreg-23_batch-100/hist_M%d_na.txt' % (
                np.log10(mm) * 10)
            offset, ivar = setnoise(datasm, noisefile, noisevar=0.25)
            for iR, zlR in enumerate(zip(RRs, lrs)):
                RR, lR = zlR
                for ww in wws:
                    for ff in [
                            fpath + '/figs-M%02d-R%02d-w%01d' %
                        (np.log10(mm), 10 * RR, ww)
                    ]:
                        try:
                            os.makedirs(ff)
                        except Exception as e:
                            print(e)
                    if readin:
                        if mm > mm0: continue
                        elif mm == mm0 and RR > RR0:
                            print(RR, RR0, RRs)
                            continue
                        elif RR == RR0 and ww <= ww0:
                            print(ww, ww0, wws)
                            continue
                        else:
                            print('Starting from %0.2e' % mm, RR, ww)
                    print('Do for %0.2e' % mm, RR, ww)

                    for i in range(niters[iR]):
                        iiter += 1
                        sess.run(
                            tf_update_ops, {
                                lr: lR,
                                M0: mm,
                                R0: RR,
                                width: ww,
                                tf_off: offset,
                                tf_istd: ivar**0.5
                            })
                        if (i % titer == 0):
                            end = time.time()
                            print('Iter : ', i)
                            print('Time taken for %d iterations: ' % titer,
                                  end - start)
                            start = end

                            ##
                            ic1, data1, cc, pp = sess.run(
                                [tf_initc, tf_data, tf_chisq, tf_prior], {
                                    M0: mm,
                                    R0: RR,
                                    width: ww,
                                    tf_off: offset,
                                    tf_istd: ivar**0.5
                                })
                            print('Chisq and prior are : ', cc, pp)

                            dg.saveimfig(i, [ic1, data1], [ic, data], ff)
                            dg.save2ptfig(i, [ic1, data1], [ic, data], ff, bs)

                    ic1, data1 = sess.run([tf_initc, tf_data], {width: ww})
                    np.save(ff + '/ic_recon', ic1)
                    np.save(ff + '/data_recon', data1)
                    dg.saveimfig(iiter, [ic1, data1], [ic, data],
                                 fpath + '/figs')
                    dg.save2ptfig(iiter, [ic1, data1], [ic, data],
                                  fpath + '/figs', bs)

            wws = [3]
            RRs = [0]
            niters = [201, 101, 201]
            lrs = np.array([0.1, 0.1, 0.1])

        ic1, data1 = sess.run([tf_initc, tf_data], {width: 3})
        print('Total time taken for %d iterations is : ' % iiter,
              time.time() - start0)

    dg.saveimfig('', [ic1, data1], [ic, data], fpath)
    dg.save2ptfig('', [ic1, data1], [ic, data], fpath, bs)

    np.save(fpath + 'ic_recon', ic1)
    np.save(fpath + 'data_recon', data1)
    print('Total wallclock time is : ', time.time() - start0)

    ##
    exit(0)
コード例 #10
0
def main(_):

    infield = True
    dtype = tf.float32
    mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)
    nc, bs = FLAGS.nc, FLAGS.box_size
    a0, a, nsteps = FLAGS.a0, FLAGS.af, FLAGS.nsteps
    stages = np.linspace(a0, a, nsteps, endpoint=True)
    numd = 1e-3

    ##Begin here
    klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)

    #pypath = '/global/cscratch1/sd/chmodi/cosmo4d/output/version2/L0400_N0128_05step-fof/lhd_S0100/n10/opt_s999_iM12-sm3v25off/meshes/'
    final = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/mesh/d/'
    )
    ic = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/mesh/s/'
    )
    fpos = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0128_S0100_05step/dynamic/1/Position/'
    )
    aa = 1
    zz = 1 / aa - 1
    rsdfactor = float(100 / (aa**2 * cosmo.H(zz).value**1))
    print('\nRsdfactor used is : ', rsdfactor)

    hpos = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/PeakPosition//'
    )[1:int(bs**3 * numd)]
    hvel = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/CMVelocity//'
    )[1:int(bs**3 * numd)]
    rsdpos = hpos + hvel * rsdfactor * np.array([0, 0, 1])
    print('Effective displacement : ', (hvel[:, -1] * rsdfactor).std())
    hmass = tools.readbigfile(
        '/project/projectdirs/m3058/chmodi/cosmo4d/data/L0400_N0512_S0100_40step/FOF/Mass//'
    )[1:int(bs**3 * numd)].flatten()

    meshpos = tools.paintcic(rsdpos, bs, nc)
    meshmass = tools.paintcic(rsdpos, bs, nc, hmass.flatten() * 1e10)
    data = meshmass
    kv = tools.fftk([nc, nc, nc], bs, symmetric=True, dtype=np.float32)
    datasm = tools.fingauss(data, kv, 3, np.pi * nc / bs)
    ic, data = np.expand_dims(ic, 0), np.expand_dims(data,
                                                     0).astype(np.float32)
    datasm = np.expand_dims(datasm, 0).astype(np.float32)
    print("Min in data : %0.4e" % datasm.min())

    #

    ####################################################

    stdinit = srecon.standardinit(bs, nc, meshpos, hpos, final, R=8)
    recon_estimator = tf.estimator.Estimator(model_fn=model_fn,
                                             model_dir=fpath)

    def predict_input_fn(data=data,
                         M0=0.,
                         w=3.,
                         R0=0.,
                         off=None,
                         istd=None,
                         x0=None):
        features = {}
        features['datasm'] = data
        features['rsdfactor'] = rsdfactor
        features['M0'] = M0
        features['w'] = w
        features['R0'] = R0
        features['off'] = off
        features['istd'] = istd
        features['x0'] = x0
        return features, None

    eval_results = recon_estimator.predict(
        input_fn=lambda: predict_input_fn(x0=ic), yield_single_examples=False)

    for i, pred in enumerate(eval_results):
        if i > 0: break

    suff = '-model'
    dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data],
                 fpath + '/figs/')
    dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data],
                  fpath + '/figs/', bs)
    np.save(fpath + '/reconmeshes/ic_true' + suff, pred['ic'])
    np.save(fpath + '/reconmeshes/fin_true' + suff, pred['final'])
    np.save(fpath + '/reconmeshes/model_true' + suff, pred['model'])

    #
    randominit = np.random.normal(size=data.size).reshape(data.shape)
    #eval_results = recon_estimator.predict(input_fn=lambda : predict_input_fn(x0 = np.expand_dims(stdinit, 0)), yield_single_examples=False)
    eval_results = recon_estimator.predict(
        input_fn=lambda: predict_input_fn(x0=randominit),
        yield_single_examples=False)

    for i, pred in enumerate(eval_results):
        if i > 0: break

    suff = '-init'
    dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data],
                 fpath + '/figs/')
    dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data],
                  fpath + '/figs/', bs)
    np.save(fpath + '/reconmeshes/ic_init' + suff, pred['ic'])
    np.save(fpath + '/reconmeshes/fin_init' + suff, pred['final'])
    np.save(fpath + '/reconmeshes/model_init' + suff, pred['model'])

    #
    # Train and evaluate model.
    mms = [1e12, 1e11]
    wws = [1., 2., 3.]
    RRs = [4., 2., 1., 0.5, 0.]
    niter = 100
    iiter = 0

    for mm in mms:

        noisefile = '/project/projectdirs/m3058/chmodi/cosmo4d/train/L0400_N0128_05step-n10/width_3/Wts_30_10_1/r1rf1/hlim-13_nreg-43_batch-5/eluWts-10_5_1/blim-20_nreg-23_batch-100/hist_M%d_na.txt' % (
            np.log10(mm) * 10)
        offset, ivar = setnoise(datasm, noisefile, noisevar=0.25)
        istd = ivar**0.5
        if not FLAGS.offset: offset = None
        if not FLAGS.istd: istd = None

        for R0 in RRs:

            for ww in wws:

                print('\nFor iteration %d\n' % iiter)
                print('With mm=%0.2e, R0=%0.2f, ww=%d \n' % (mm, R0, ww))

                def train_input_fn():
                    features = {}
                    features['datasm'] = datasm
                    features['rsdfactor'] = rsdfactor
                    features['M0'] = mm
                    features['w'] = ww
                    features['R0'] = R0
                    features['off'] = offset
                    features['istd'] = istd
                    features['x0'] = np.expand_dims(
                        stdinit, 0
                    )  #np.random.normal(size=datasm.size).reshape(datasm.shape)
                    features['lr'] = 0.01
                    return features, None

                recon_estimator.train(input_fn=train_input_fn,
                                      max_steps=iiter + niter)
                eval_results = recon_estimator.predict(
                    input_fn=predict_input_fn, yield_single_examples=False)

                for i, pred in enumerate(eval_results):
                    if i > 0: break

                iiter += niter  #
                suff = '-%d-M%d-R%d-w%d' % (iiter, np.log10(mm), R0, ww)
                dg.saveimfig(suff, [pred['ic'], pred['model']], [ic, data],
                             fpath + '/figs/')
                dg.save2ptfig(suff, [pred['ic'], pred['model']], [ic, data],
                              fpath + '/figs/', bs)
                suff = '-M%d-R%d-w%d' % (np.log10(mm), R0, ww)
                np.save(fpath + '/reconmeshes/ic' + suff, pred['ic'])
                np.save(fpath + '/reconmeshes/fin' + suff, pred['final'])
                np.save(fpath + '/reconmeshes/model' + suff, pred['model'])

        RRs = [1., 0.5, 0.]
        wws = [3.]
        niter = 200

    sys.exit(0)

    ##
    exit(0)