Exemplo n.º 1
0
def train_step(inputs):
    x_true, y = inputs
    if args.stdinit:
        x_init = y[:, 1] / b1eul
        if args.diffps:
            x_init = x_init + linear_field(
                nc, bs, ipkdiff, batch_size=y.shape[0])
    elif args.priorinit:
        x_init = linear_field(nc, bs, ipklin, batch_size=y.shape[0])
    else:
        x_init = tf.random.normal(x_true.shape)
    y = y[:, 0]
    if len(rim.trainable_variables) == 0:
        #Hack since sometimes this si the first time RIM is called and so hasn't been inisitalized
        i = 0
        a, b, c = x_init[i:i + 1], y[i:i + 1], x_true[i:i + 1]
        _ = rim(tf.constant(a), tf.constant(b), grad_fn, tf.constant(c),
                grad_params)[1] / args.batch_size

    #
    gradients = [0.] * len(rim.trainable_variables)
    #n = args.sims_in_loop
    for i in range(args.batch_size // world_size):
        with tf.GradientTape() as tape:
            a, b, c = x_init[i:i + 1], y[i:i + 1], x_true[i:i + 1]
            loss = rim(tf.constant(a), tf.constant(b), grad_fn, tf.constant(c),
                       grad_params)[1] / args.batch_size
        grads = tape.gradient(loss, rim.trainable_variables)
        for j in range(len(grads)):
            gradients[j] = gradients[j] + grads[j]
    optimizer.apply_gradients(zip(gradients, rim.trainable_variables))
    return loss
Exemplo n.º 2
0
def test_step(inputs):
    x_true, y = inputs
    #x_init = tf.random.normal(x_true.shape)
    if args.stdinit:
        x_init = y[:, 1] / b1eul + linear_field(
            nc, bs, ipkdiff, batch_size=y.shape[0])
        #x_init = y[:, 1]
        y = y[:, 0]
    elif args.priorinit:
        x_init = linear_field(nc, bs, ipklin, batch_size=y.shape[0])
    else:
        x_init = tf.random.normal(x_true.shape)
    x_pred = rim(x_init, y, grad_fn, grad_params)
    return x_pred, x_init, x_true, y
Exemplo n.º 3
0
def train_step(inputs):
    x_true, y = inputs
    if len(rim.trainable_variables) == 0:
        #Hack since sometimes this si the first time RIM is called and so hasn't been inisitalized
        i = 0
        b, c = y[i:i+1],  x_true[i:i+1]
        if args.stdinit:
            a = b[:, 1] / b1eul 
            if args.diffps : a = a + linear_field(nc, bs, ipkdiff, batch_size=b.shape[0])
        else: a = tf.random.normal(c.shape)
        b = b[:, 0]
        try: a, b, c = tf.constant(a), tf.constant(b),  tf.constant(c)
        except: pass
        _ =  rim(a, b, grad_fn, c, grad_params)[1] / args.batch_size
    #
    gradients = [0.]*len(rim.trainable_variables)
    for i in range(args.batch_size // world_size):
        with tf.GradientTape() as tape:
            b, c = y[i:i+1],  x_true[i:i+1]
            a = tf.random.normal(c.shape)
            b = b[:, 0]
            try: a, b, c = tf.constant(a), tf.constant(b),  tf.constant(c)
            except: pass
            loss =  rim(a, b, grad_fn, c, grad_params)[1] / args.batch_size
        grads = tape.gradient(loss, rim.trainable_variables)
        for j in range(len(grads)):
            gradients[j] = gradients[j] + grads[j] 
    optimizer.apply_gradients(zip(gradients, rim.trainable_variables))
    return loss
Exemplo n.º 4
0
def check_2pt(xx, yy, rim, grad_fn, compares, nrim=10, fname=None):
    truemesh = [xx[0], yy[0]]
    rimpreds = []
    for it in range(nrim):
        x_init = flowpm.linear_field(nc, bs, ipklin,
                                     batch_size=xx.shape[0]).numpy()
        #x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)
        #x_init = (yy - (yy.max() - yy.min())/2.)/yy.std() + np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)
        pred = rim(tf.constant(x_init), tf.constant(yy), grad_fn)[-1]
        rimpreds.append([pred[0].numpy(), pm(pred)[0].numpy()])

    fig, ax = plt.subplots(1, 2, figsize=(9, 4), sharex=True)
    for ip, preds in enumerate(rimpreds):
        k, pks = tools.get_ps(preds, truemesh, bs)
        for i in range(2):
            lbl = None
            if ip == 0 and i == 0: lbl = 'Linear'
            if ip == 0 and i == 1: lbl = 'Final'
            ax[0].plot(k,
                       pks[i][2] / (pks[i][0] * pks[i][1])**0.5,
                       'C%d-' % i,
                       alpha=0.4,
                       label=lbl)
            ax[1].plot(k, (pks[i][0] / pks[i][1])**0.5, 'C%d-' % i, alpha=0.4)

    lss = ['-', '--', ':', '-.']
    lws = [1, 1, 2, 2]
    lbls = ['Adam', 'Adam 10x', 'Best recon']
    #for ip, preds in enumerate([pred_adam, pred_adam10]):
    for ip, preds in enumerate(compares):
        k, pks = tools.get_ps(preds, truemesh, bs)
        for i in range(2):
            lbl = None
            if i == 0: lbl = lbls[ip]
            ax[0].plot(k,
                       pks[i][2] / (pks[i][0] * pks[i][1])**0.5,
                       'C%d' % i,
                       ls=lss[ip + 1],
                       lw=lws[ip + 1])
            ax[1].plot(k, (pks[i][0] / pks[i][1])**0.5,
                       'C%d' % i,
                       label=lbl,
                       ls=lss[ip + 1],
                       lw=lws[ip + 1])

    for axis in ax:
        axis.semilogx()
        axis.grid(which='both')
        axis.legend(fontsize=12)
        axis.set_xlabel('k(h/Mpc)', fontsize=12)
    ax[0].set_ylim(-0.1, 1.2)
    ax[1].set_ylim(-0.5, 2.0)
    ax[0].set_ylabel('$r_c$', fontsize=12)
    ax[1].set_ylabel('$t_f$', fontsize=12)
    plt.tight_layout()
    if fname is not None: plt.savefig(fname)
    else: plt.savefig('rim-2pt.png')
    plt.close()
Exemplo n.º 5
0
def test_step(inputs):
    x_true, y = inputs
    if args.stdinit:
        x_init = y[:, 1] / b1eul 
        if args.diffps : x_init = x_init + linear_field(nc, bs, ipkdiff, batch_size=y.shape[0])
    else: x_init = tf.random.normal(x_true.shape)
    y = y[:, 0]
    x_pred = rim(x_init, y, grad_fn, x_true, grad_params)[0]
    return x_pred, x_init, x_true, y
def test_save_state():
    """
  Tests the BigFile saving function
  """
    klin = np.loadtxt('flowpm/data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('flowpm/data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)
    a0 = 0.1
    nc = [16, 16, 16]
    boxsize = [100., 100., 100.]
    cosmo = flowpm.cosmology.Planck15()

    initial_conditions = flowpm.linear_field(
        nc,  # size of the cube
        boxsize,  # Physical size of the cube
        ipklin,  # Initial powerspectrum
        batch_size=2)

    # Sample particles
    state = flowpm.lpt_init(cosmo, initial_conditions, a0)

    with tempfile.TemporaryDirectory() as tmpdirname:
        filename = tmpdirname + '/testsave'
        save_state(cosmo, state, a0, nc, boxsize, filename)

        # Now try to reload the information using BigFile
        bf = bigfile.BigFile(filename)

        # Testing recovery of header
        header = bf['Header']
        assert_allclose(np.array(header.attrs['NC']), np.array(nc))
        assert_allclose(np.array(header.attrs['BoxSize']), np.array(boxsize))
        assert_allclose(np.array(header.attrs['OmegaCDM']),
                        np.array(cosmo.Omega_c))
        assert_allclose(np.array(header.attrs['OmegaB']),
                        np.array(cosmo.Omega_b))
        assert_allclose(np.array(header.attrs['OmegaK']),
                        np.array(cosmo.Omega_k))
        assert_allclose(np.array(header.attrs['h']), np.array(cosmo.h))
        assert_allclose(np.array(header.attrs['Sigma8']),
                        np.array(cosmo.sigma8))
        assert_allclose(np.array(header.attrs['w0']), np.array(cosmo.w0))
        assert_allclose(np.array(header.attrs['wa']), np.array(cosmo.wa))
        assert_allclose(np.array(header.attrs['Time']), np.array(a0))

        # Testing recovery of data
        pos = bf['1/Position']
        assert_allclose(pos[:], state[0, 1].numpy() / nc[0] * boxsize[0])
        vel = bf['1/Velocity']
        assert_allclose(vel[:], state[1, 1].numpy() / nc[0] * boxsize[0])

        # Closing file
        bf.close()
Exemplo n.º 7
0
    def pm_data_test(self, dummy):
        args = self.args
        nc, bs = args.nc, args.bs

        print("PM graph")
        linear = flowpm.linear_field(nc,
                                     bs,
                                     args.ipklin,
                                     batch_size=args.world_size)
        base = self.pm(linear)
        sample = self.gal_sample(base)
        return linear, sample
Exemplo n.º 8
0
def pm_data_test(dummy):
    print("PM graph")
    linear = flowpm.linear_field(nc, bs, ipklin, batch_size=world_size)
    if args.nbody:
        print('Nobdy sim')
        state = lpt_init(linear, a0=a0, order=args.lpt_order)
        final_state = nbody(state, stages, nc)
    else:
        print('ZA/2LPT sim')
        final_state = lpt_init(linear, a0=af, order=args.lpt_order)
    tfinal_field = cic_paint(tf.zeros_like(linear), final_state[0])
    return linear, tfinal_field
Exemplo n.º 9
0
def test_step(inputs):
    x_true, y = inputs
    #x_init = tf.random.normal(x_true.shape)
    if args.stdinit:
        x_init = y[:, 1] / b1eul + linear_field(
            nc, bs, ipkdiff, batch_size=y.shape[0])
        #x_init = y[:, 1]
        y = y[:, 0]
    else:
        print('data init')
        x_init = y
    x_pred = cnn(tf.expand_dims(x_init, -1))[..., 0]
    #x_pred = cnn(x_init)
    return x_pred, x_init, x_true, y
Exemplo n.º 10
0
def train_step(inputs):
    x_true, y = inputs
    #x_init = tf.random.normal(x_true.shape)
    if args.stdinit:
        x_init = y[:, 1] / b1eul + linear_field(
            nc, bs, ipkdiff, batch_size=y.shape[0])
        #x_init = y[:, 1]
        y = y[:, 0]
    elif args.priorinit:
        x_init = linear_field(nc, bs, ipklin, batch_size=y.shape[0])
    else:
        x_init = tf.random.normal(x_true.shape)
    with tf.GradientTape() as tape:
        x_pred = rim(x_init, y, grad_fn, grad_params)
        res = (x_true - x_pred)
        #print(res.shape)
        loss = tf.reduce_mean(
            tf.square(res),
            axis=(0, 2, 3, 4))  ##This is not advised, come back to this
        loss = tf.reduce_sum(loss) / args.batch_size
    gradients = tape.gradient(loss, rim.trainable_variables)
    optimizer.apply_gradients(zip(gradients, rim.trainable_variables))
    print(optimizer._decayed_lr(tf.float32))
    return loss
def main(_):
    cosmology = flowpm.cosmology.Planck15()

    # Compute the k vectora that will be needed in the PGD fit
    k, _ = flowpm.power_spectrum(tf.zeros([1] + [FLAGS.nc] * 3),
                                 boxsize=np.array([FLAGS.box_size] * 3),
                                 kmin=np.pi / FLAGS.box_size,
                                 dk=2 * np.pi / FLAGS.box_size)

    # Create some initial conditions
    klin = tf.constant(np.logspace(-4, 1, 512), dtype=tf.float32)
    pk = linear_matter_power(cosmology, klin)
    pk_fun = lambda x: tf.cast(
        tf.reshape(
            interpolate.interp_tf(tf.reshape(tf.cast(x, tf.float32), [-1]),
                                  klin, pk), x.shape), tf.complex64)

    initial_conditions = flowpm.linear_field(
        [FLAGS.nc, FLAGS.nc, FLAGS.nc],
        [FLAGS.box_size, FLAGS.box_size, FLAGS.box_size],
        pk_fun,
        batch_size=FLAGS.batch_size)

    initial_state = flowpm.lpt_init(cosmology, initial_conditions,
                                    FLAGS.a_init)
    stages = np.linspace(FLAGS.a_init, 1., FLAGS.nsteps, endpoint=True)

    print('Starting simulation')
    state, scale_factors, pgdparams = fit_nbody(cosmology,
                                                initial_state,
                                                stages,
                                                [FLAGS.nc, FLAGS.nc, FLAGS.nc],
                                                pm_nc_factor=FLAGS.B)
    print('Simulation done')

    pickle.dump(
        {
            'B': FLAGS.B,
            'nsteps': FLAGS.nsteps,
            'params': pgdparams,
            'scale_factors': scale_factors,
            'cosmology': cosmology.to_dict(),
            'boxsize': FLAGS.box_size,
            'nc': FLAGS.nc
        }, open(FLAGS.filename, "wb"))
Exemplo n.º 12
0
def pm_poisson():
    print("PM graph")
    linear = flowpm.linear_field(nc, bs, ipklin, batch_size=batch_size)
    if args.nbody:
        print('Nobdy sim')
        state = lpt_init(linear, a0=a0, order=args.lpt_order)
        final_state = nbody(state,  stages, nc)
    else:
        print('ZA/2LPT sim')
        final_state = lpt_init(linear, a0=af, order=args.lpt_order)
    tfinal_field = cic_paint(tf.zeros_like(linear), final_state[0])
    base = tfinal_field
    if Rsm != 0:
         basek = r2c3d(tfinal_field, norm=nc**3)
         basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
         base = c2r3d(basek, norm=nc**3)

    galmean = tfp.distributions.Poisson(rate = plambda * (1 + base))
    result = galmean.sample()
    return linear, tfinal_field, result, base
Exemplo n.º 13
0
def train_step(inputs):
    x_true, y = inputs
    #x_init = tf.random.normal(x_true.shape)
    if args.stdinit:
        print('std init')
        x_init = y[:, 1] / b1eul + linear_field(
            nc, bs, ipkdiff, batch_size=y.shape[0])
        #x_init = y[:, 1]
        y = y[:, 0]
    else:
        print('data init')
        x_init = y
    with tf.GradientTape() as tape:
        x_pred = cnn(tf.expand_dims(x_init, -1))[..., 0]
        print(x_pred.shape)
        res = (x_true - x_pred)
        #print(res.shape)
        loss = tf.reduce_mean(tf.square(res))
    gradients = tape.gradient(loss, cnn.trainable_variables)
    optimizer.apply_gradients(zip(gradients, cnn.trainable_variables))
    print("learnign rate : ", optimizer._decayed_lr(tf.float32))
    print('Len trainable : ', len(cnn.trainable_variables))
    return loss
Exemplo n.º 14
0
else:
    ofolder = './models/L%04d_N%03d/LPT%d%s/' % (bs, nc, args.lpt_order,
                                                 suffpath)
try:
    os.makedirs(ofolder)
except Exception as e:
    print(e)

#######################################

x_test, y_test = testdata[0:1, 0], testdata[0:1, 1:]
x_test = tf.constant(x_test, dtype=tf.float32)
if args.stdinit:
    x_init = tf.constant(y_test[:, 1] / b1eul, dtype=tf.float32)
    if args.diffps:
        x_init = x_init + linear_field(
            nc, bs, ipkdiff, batch_size=y_test.shape[0])
elif args.priorinit:
    x_init = linear_field(nc, bs, ipklin, batch_size=y_test.shape[0])
else:
    x_init = tf.random.normal(x_test.shape)
y_test = tf.constant(y_test[:, 0])
minic, minfin = datamodel.reconstruct(tf.constant(y_test),
                                      bias,
                                      errormesh,
                                      RRs=RRs,
                                      niter=args.rim_iter * 20,
                                      lr=0.5,
                                      x_init=x_init,
                                      useprior=True)
pred_adam, _ = datamodel.reconstruct(tf.constant(y_test),
                                     bias,
Exemplo n.º 15
0
def main(_):

    mesh_shape = [("row", 2), ("col", 2)]
    layout_rules = [("nx_lr", "row"), ("ny_lr", "col"), ("nx", "row"),
                    ("ny", "col"), ("ty_lr", "row"), ("tz_lr", "col"),
                    ("nx_block", "row"), ("ny_block", "col")]

    mesh_hosts = ["localhost:%d" % (8222 + j) for j in range(4)]

    # Create a cluster from the mesh hosts.
    cluster = tf.train.ClusterSpec({
        "mesh": mesh_hosts,
        "master": ["localhost:8488"]
    })

    # Create a server for local mesh members
    server = tf.train.Server(cluster, job_name="master", task_index=0)

    mesh_devices = [
        '/job:mesh/task:%d' % i for i in range(cluster.num_tasks("mesh"))
    ]
    print("List of devices", mesh_devices)
    mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
        mesh_shape, layout_rules, mesh_devices)

    # Build the model

    # Create computational graphs and some initializations

    graph = mtf.Graph()
    mesh = mtf.Mesh(graph, "nbody_mesh")

    # Compute a few things first, using simple tensorflow
    a0 = FLAGS.a0
    a = FLAGS.af
    nsteps = FLAGS.nsteps
    bs, nc = FLAGS.box_size, FLAGS.nc
    klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)
    stages = np.linspace(a0, a, nsteps, endpoint=True)

    #pt = PerturbationGrowth(cosmology, a=[a], a_normalize=1.0)
    # Generate a batch of 3D initial conditions
    initial_conditions = flowpm.linear_field(
        FLAGS.nc,  # size of the cube
        FLAGS.box_size,  # Physical size of the cube
        ipklin,  # Initial power spectrum
        batch_size=FLAGS.batch_size)

    state = lpt_init(initial_conditions, a0=a0, order=1)
    #final_state = state
    final_state = nbody(state, stages, nc)
    tfinal_field = cic_paint(tf.zeros_like(initial_conditions), final_state[0])

    # Compute necessary Fourier kernels
    kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)
    from flowpm.kernels import laplace_kernel, gradient_kernel
    lap = tf.cast(laplace_kernel(kvec), tf.complex64)
    grad_x = gradient_kernel(kvec, 0)
    grad_y = gradient_kernel(kvec, 1)
    grad_z = gradient_kernel(kvec, 2)
    derivs = [lap, grad_x, grad_y, grad_z]

    mesh_final_field = lpt_prototype(mesh,
                                     initial_conditions,
                                     derivs,
                                     bs=FLAGS.box_size,
                                     nc=FLAGS.nc,
                                     batch_size=FLAGS.batch_size)
    # Lower mesh computation
    lowering = mtf.Lowering(graph, {mesh: mesh_impl})

    # Retrieve output of computation
    result = lowering.export_to_tf_tensor(mesh_final_field)

    with tf.Session(server.target,
                    config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)) as sess:
        a, b, c = sess.run([initial_conditions, tfinal_field, result])
    np.save('init', a)
    np.save('reference_final', b)
    np.save('mesh_pyramid', c)

    plt.figure(figsize=(15, 3))
    plt.subplot(141)
    plt.imshow(a[0].sum(axis=2))
    plt.title('Initial Conditions')

    plt.subplot(142)
    plt.imshow(b[0].sum(axis=2))
    plt.title('TensorFlow (single GPU)')
    plt.colorbar()

    plt.subplot(143)
    plt.imshow(c[0].sum(axis=2))
    plt.title('Mesh TensorFlow Single')
    plt.colorbar()

    plt.subplot(144)
    plt.imshow((b[0] - c[0]).sum(axis=2))
    plt.title('Residuals')
    plt.colorbar()

    plt.savefig("comparison-single.png")

    exit(0)
Exemplo n.º 16
0
def test_convergence_Born(return_results=False):
    """ This function tests that given a set of density planes, 
  both lenstools and flowpm recover the same convergence maps in
  angular coordinates.
  """
    klin = np.loadtxt(data_path + '/flowpm/data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt(data_path + '/flowpm/data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)

    cosmo = flowpm.cosmology.Planck15()

    a0 = 0.9

    # Create a state vector
    initial_conditions = flowpm.linear_field([nc, nc, 10 * nc],
                                             [bs, bs, 10 * bs],
                                             ipklin,
                                             batch_size=2)
    state = flowpm.lpt_init(cosmo, initial_conditions, a0)

    r = tf.linspace(0., 10 * bs, 11)
    r_center = 0.5 * (r[1:] + r[:-1])
    a_center = flowpm.background.a_of_chi(cosmo, r_center)

    constant_factor = 3 / 2 * cosmo.Omega_m * (constants.H0 / constants.c)**2

    # To make it convenient to access simulation properties in lenstools
    # let's quicly export and reload the sim
    # TODO: remove the need for this!
    flowpm.io.save_state(cosmo,
                         state,
                         a0, [nc, nc, 10 * nc], [bs, bs, 10 * bs],
                         'snapshot_born_testing',
                         attrs={'comoving_distance': r_center[0]})

    # Reload the snapshot with lenstools
    snapshot = FlowPMSnapshot.open('snapshot_born_testing')

    # Get some density planes and create lenstool tracer
    lensplanes = []
    tracer = lt.simulations.RayTracer(lens_type=lt.simulations.DensityPlane)
    for i in range(len(r_center)):
        plane = flowpm.raytracing.density_plane(
            state, [nc, nc, 10 * nc],
            r_center[i] / bs * nc,
            width=nc,
            plane_resolution=plane_resolution)
        r, a, p = r_center[i], a_center[i], plane[0]
        lensplanes.append((r, a, plane))

        density_normalization = bs * r / a

        # We upsample the lensplanes before giving them to lenstools because
        # lentools is using a weird kind of interpolation when converting from
        # comoving coordinates to angular coords. with a larger
        p = tf.image.resize(
            tf.reshape(p, [1, plane_resolution, plane_resolution, 1]),
            [2048, 2048])

        p = (p[0, :, :, 0] * constant_factor * density_normalization).numpy()
        p = p - np.mean(p)
        lt_plane = lt.simulations.DensityPlane(
            p,
            angle=snapshot.header["box_size"],
            redshift=1 / a - 1,
            cosmology=snapshot.cosmology)
        tracer.addLens(lt_plane)

    # Adding dummy lensplane at the end
    tracer.addLens(
        lt.simulations.DensityPlane(np.zeros((2048, 2048)),
                                    angle=snapshot.header["box_size"],
                                    redshift=0.99,
                                    cosmology=snapshot.cosmology))
    tracer.addLens(
        lt.simulations.DensityPlane(np.zeros((2048, 2048)),
                                    angle=snapshot.header["box_size"],
                                    redshift=2,
                                    cosmology=snapshot.cosmology))
    tracer.reorderLenses()

    # Create an array of coordinates at which to retrieve the convernge maps
    xgrid, ygrid = np.meshgrid(
        np.linspace(0, field, npix, endpoint=False),  # range of X coordinates
        np.linspace(0, field, npix, endpoint=False))  # range of Y coordinates

    coords = np.stack([xgrid, ygrid], axis=0) * u.deg
    c = coords.reshape([2, -1]).T

    # Compute convergence map with lenstool
    lt_map = tracer.convergenceBorn(coords, z=1.0)

    # Compute convergemce map with flowpm
    fpm_map = flowpm.raytracing.convergenceBorn(cosmo,
                                                lensplanes,
                                                bs / nc,
                                                bs,
                                                c.to(u.rad),
                                                z_source=tf.ones([1]),
                                                field_npix=npix)

    # Comparing the final maps
    assert_allclose(lt_map,
                    fpm_map[0].numpy().reshape([npix, npix, -1])[:, :, -1],
                    atol=5e-4)

    if return_results:
        return lt_map, fpm_map
Exemplo n.º 17
0
def main():
    """
    Model function for the CosmicRIM.
    """

    if args.parallel: rim = build_rim_parallel(params)
    else: rim = build_rim_split(params)
    grad_fn = recon_dm_grad
    #

    #
    #    train_dataset = tf.data.Dataset.range(args.batch_in_epoch)
    #    train_dataset = train_dataset.map(pm_data)
    #    # dset = dset.apply(tf.data.experimental.unbatch())
    #    train_dataset = train_dataset.prefetch(-1)
    #    test_dataset = tf.data.Dataset.range(1).map(pm_data_test).prefetch(-1)
    #
    traindata, testdata = get_data()
    idx = np.random.randint(0, traindata.shape[0], 1)
    xx, yy = traindata[idx,
                       0].astype(np.float32), traindata[idx,
                                                        1].astype(np.float32),
    x_init = flowpm.linear_field(nc, bs, ipklin, batch_size=xx.shape[0])
    #x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)
    x_pred = rim(x_init, yy, grad_fn)

    #
    # @tf.function
    def rim_train(x_true, x_init, y):

        with tf.GradientTape() as tape:
            x_pred = rim(x_init, y, grad_fn)
            res = (x_true - x_pred)
            loss = tf.reduce_mean(tf.square(res))
        gradients = tape.gradient(loss, rim.trainable_variables)
        return loss, gradients

    ##Train and save
    piter, testiter = 10, 50
    losses = []
    lrs = [0.0001, 0.0001]
    liters = [1001, 2001]
    trainiter = 0
    start = time.time()
    x_test, y_test = None, None

    for il in range(len(lrs)):
        print('Learning rate = %0.3e' % lrs[il])
        opt = tf.keras.optimizers.Adam(learning_rate=lrs[il])

        for i in range(liters[il]):
            idx = np.random.randint(0, traindata.shape[0], args.batch_size)
            xx, yy = traindata[idx, 0].astype(
                np.float32), traindata[idx, 1].astype(np.float32),
            x_init = flowpm.linear_field(nc,
                                         bs,
                                         ipklin,
                                         batch_size=xx.shape[0]).numpy()
            #x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)
            #x_init = (yy - (yy.max() - yy.min())/2.)/yy.std() + np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)

            loss, gradients = rim_train(x_true=tf.constant(xx),
                                        x_init=tf.constant(x_init),
                                        y=tf.constant(yy))

            losses.append(loss.numpy())
            opt.apply_gradients(zip(gradients, rim.trainable_variables))

            if i % piter == 0:
                print("Time taken for %d iterations : " % piter,
                      time.time() - start)
                print("Loss at iteration %d : " % i, losses[-1])
                start = time.time()
            if i % testiter == 0:
                plt.plot(losses)
                plt.savefig(ofolder + 'losses.png')
                plt.close()

                ##check 2pt and comapre to Adam
                #idx = np.random.randint(0, testdata.shape[0], 1)
                #xx, yy = testdata[idx, 0].astype(np.float32), testdata[idx, 1].astype(np.float32),
                if x_test is None:
                    idx = np.random.randint(0, testdata.shape[0], 1)
                    x_test, y_test = testdata[idx, 0].astype(
                        np.float32), testdata[idx, 1].astype(np.float32),
                    x_init = flowpm.linear_field(
                        nc, bs, ipklin, batch_size=x_test.shape[0]).numpy()
                    pred_adam = adam(tf.constant(x_init), tf.constant(y_test),
                                     grad_fn)
                    pred_adam = [
                        pred_adam[0].numpy(),
                        pm(pred_adam)[0].numpy()
                    ]
                    pred_adam10 = adam10(tf.constant(x_init),
                                         tf.constant(y_test), grad_fn)
                    pred_adam10 = [
                        pred_adam10[0].numpy(),
                        pm(pred_adam10)[0].numpy()
                    ]
                    minic, minfin = fid_recon.reconstruct(tf.constant(y_test),
                                                          RRs=[1.0, 0.0],
                                                          niter=args.rim_iter *
                                                          10,
                                                          lr=0.1)
                    compares = [pred_adam, pred_adam10, [minic[0], minfin[0]]]
                    print('Test set generated')

                #x_init = np.random.normal(size=x_test.size).reshape(x_test.shape).astype(np.float32)
                x_init = flowpm.linear_field(
                    nc, bs, ipklin, batch_size=x_test.shape[0]).numpy()
                #x_init = (y_test - (y_test.max() - y_test.min())/2.)/y_test.std() + np.random.normal(size=x_test.size).reshape(x_test.shape).astype(np.float32)
                pred = rim(tf.constant(x_init), tf.constant(y_test),
                           grad_fn)[-1]
                check_im(x_test[0],
                         x_init[0],
                         pred.numpy()[0],
                         fname=ofolder + 'rim-im-%04d.png' % trainiter)
                check_2pt(x_test,
                          y_test,
                          rim,
                          grad_fn,
                          compares,
                          fname=ofolder + 'rim-2pt-%04d.png' % trainiter)

                rim.save_weights(ofolder + '/%d' % trainiter)

            trainiter += 1
Exemplo n.º 18
0
def main(_):

    dtype=tf.float32

    startw = time.time()

    tf.random.set_random_seed(100)
    np.random.seed(100)

    
    # Compute a few things first, using simple tensorflow
    a0=FLAGS.a0
    a=FLAGS.af
    nsteps=FLAGS.nsteps
    bs, nc = FLAGS.box_size, FLAGS.nc
    klin = np.loadtxt('../data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)
    stages = np.linspace(a0, a, nsteps, endpoint=True)

    tf.reset_default_graph()
    # Run normal flowpm to generate data
    try:
        ic, fin = np.load(fpath + 'ic.npy'), np.load(fpath + 'final.npy')
        print('Data loaded')
    except Exception as e:
        print('Exception occured', e)
        tfic = linear_field(FLAGS.nc, FLAGS.box_size, ipklin, batch_size=1, seed=100, dtype=dtype)
        if FLAGS.nbody:
            state = lpt_init(tfic, a0=0.1, order=1)
            final_state = nbody(state,  stages, FLAGS.nc)
        else:
            final_state = lpt_init(tfic, a0=stages[-1], order=1)
        tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0])
        with tf.Session() as sess:
            ic, fin  = sess.run([tfic, tfinal_field])
        np.save(fpath + 'ic', ic)
        np.save(fpath + 'final', fin)


    ################################################################
    tf.reset_default_graph()
    print('ic constructed')

    noise = np.random.normal(0, 1, nc**3).reshape(fin.shape).astype(np.float32)
    data_noised = fin + noise
    data = data_noised

    minimum = data.copy()
    start = noise.copy().flatten().astype(np.float32)



    Rsm = tf.placeholder(tf.float32, name='smoothing')
    def recon_prototype(linear):
        """
        """
        
        linear = tf.reshape(linear, minimum.shape)
        #loss = tf.reduce_sum(tf.square(linear - minimum)) 

        state = lpt_init(linear, a0=0.1, order=1)
        final_state = nbody(state,  stages, FLAGS.nc)
        final_field = cic_paint(tf.zeros_like(linear), final_state[0])

        residual = final_field - data.astype(np.float32)
        base = residual
##        Rsmsq = tf.multiply(Rsm*bs/nc, Rsm*bs/nc)
##        smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
##        basek = r2c3d(base, norm=nc**3)
##        basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
##        base = c2r3d(basek, norm=nc**3)   
####    #
        chisq = tf.multiply(base, base)
        chisq = tf.reduce_sum(chisq)
        chisq = tf.multiply(chisq, 1/nc**3, name='chisq')

        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1/priorwt))
        prior = tf.multiply(prior, 1/nc**3, name='prior')
        #
        loss = chisq + prior

        return loss


    @tf.function
    def min_lbfgs():
      return tfp.optimizer.lbfgs_minimize(
          make_val_and_grad_fn(recon_prototype),
          initial_position=tf.constant(start),
          tolerance=1e-5,
          max_iterations=50)

    with tf.Session() as sess:
        #results = sess.run(min_lbfgs(), {Rsm:4})
        results = sess.run(min_lbfgs())
    print(results)
    minimum = results.position
    print(minimum)

    tf.reset_default_graph()
    print('\nminimized\n')

    tfic = linear_field(FLAGS.nc, FLAGS.box_size, ipklin, batch_size=1, seed=100, dtype=dtype)*0 + minimum.reshape(data_noised.shape)
    state = lpt_init(tfic, a0=0.1, order=1)
    final_state = nbody(state,  stages, FLAGS.nc)
    tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0])
    with tf.Session() as sess:
        minic, minfin  = sess.run([tfic, tfinal_field])

    dg.saveimfig(0, [minic, minfin], [ic, fin], fpath+'')
    dg.save2ptfig(0, [minic, minfin], [ic, fin], fpath+'', bs)
    
    
##
    exit(0)
Exemplo n.º 19
0
def test_density_plane(return_results=False):
    """ Tests cutting density planes from snapshots against lenstools
  """
    klin = np.loadtxt(data_path + '/flowpm/data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt(data_path + '/flowpm/data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)

    cosmo = flowpm.cosmology.Planck15()

    a0 = 0.9
    r0 = flowpm.background.rad_comoving_distance(cosmo, a0)

    # Create a state vector
    initial_conditions = flowpm.linear_field(nc, bs, ipklin, batch_size=2)
    state = flowpm.lpt_init(cosmo, initial_conditions, a0)

    # Export the snapshot
    flowpm.io.save_state(cosmo,
                         state,
                         a0, [nc, nc, nc], [bs, bs, bs],
                         'snapshot_density_testing',
                         attrs={'comoving_distance': r0})

    # Reload the snapshot with lenstools
    snapshot = FlowPMSnapshot.open('snapshot_density_testing')

    # Cut a lensplane in the middle of the volume
    lt_plane, resolution, NumPart = snapshot.cutPlaneGaussianGrid(
        normal=2,
        plane_resolution=plane_resolution,
        center=(bs / 2) * snapshot.Mpc_over_h,
        thickness=(bs / 4) * snapshot.Mpc_over_h,
        left_corner=np.zeros(3) * snapshot.Mpc_over_h,
        smooth=None,
        kind='density')

    # Cut the same lensplane with flowpm
    fpm_plane = flowpm.raytracing.density_plane(
        state,
        nc,
        center=nc / 2,
        width=nc / 4,
        plane_resolution=plane_resolution)

    # Apply additional normalization terms to match lenstools definitions
    constant_factor = 3 / 2 * cosmo.Omega_m * (constants.H0 / constants.c)**2
    density_normalization = bs / 4 * r0 / a0
    fpm_plane = fpm_plane * density_normalization * constant_factor

    # Checking first the mean value, which accounts for any normalization
    # issues
    assert_allclose(np.mean(fpm_plane[0]), np.mean(lt_plane), rtol=1e-5)

    # To check pixelwise difference, we need to do some smoothing as lenstools and
    # flowpm use different painting kernels
    smooth_lt_plane = np.fft.ifft2(fourier_gaussian(np.fft.fft2(lt_plane),
                                                    3)).real
    smooth_fpm_plane = np.fft.ifft2(
        fourier_gaussian(np.fft.fft2(fpm_plane[0]), 3)).real

    assert_allclose(smooth_fpm_plane, smooth_lt_plane, rtol=2e-2)

    if return_results:
        return fpm_plane, lt_plane, smooth_fpm_plane, smooth_lt_plane
Exemplo n.º 20
0
bs, nc = FLAGS.box_size, FLAGS.nc
klin = np.loadtxt('../data/Planck15_a1p00.txt').T[0]
plin = np.loadtxt('../data/Planck15_a1p00.txt').T[1]
ipklin = iuspline(klin, plin)
stages = np.linspace(a0, a, nsteps, endpoint=True)

#tf.reset_default_graph()
# Run normal flowpm to generate data
try:
    ic, fin = np.load(fpath + 'ic.npy'), np.load(fpath + 'final.npy')
    print('Data loaded')
except Exception as e:
    print('Exception occured', e)
    tfic = linear_field(FLAGS.nc,
                        FLAGS.box_size,
                        ipklin,
                        batch_size=1,
                        seed=100,
                        dtype=dtype)
    if FLAGS.nbody:
        state = lpt_init(tfic, a0=0.1, order=1)
        final_state = nbody(state, stages, FLAGS.nc)
    else:
        final_state = lpt_init(tfic, a0=stages[-1], order=1)
    tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0])
    with tf.Session() as sess:
        ic, fin = sess.run([tfic, tfinal_field])
    np.save(fpath + 'ic', ic)
    np.save(fpath + 'final', fin)

################################################################
#tf.reset_default_graph()
Exemplo n.º 21
0
def main(_):

    infield = True
    mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)

    startw = time.time()

    print(mesh_shape)

    #layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
    #mesh_shape = [("row", FLAGS.nx), ("col", FLAGS.ny)]
    layout_rules = [("nx_lr", "row"), ("ny_lr", "col"), ("nx", "row"),
                    ("ny", "col"), ("ty", "row"), ("tz", "col"),
                    ("ty_lr", "row"), ("tz_lr", "col"), ("nx_block", "row"),
                    ("ny_block", "col")]

    # Resolve the cluster from SLURM environment
    cluster = tf.distribute.cluster_resolver.SlurmClusterResolver(
        {"mesh": mesh_shape.size // FLAGS.gpus_per_task},
        port_base=8822,
        gpus_per_node=FLAGS.gpus_per_node,
        gpus_per_task=FLAGS.gpus_per_task,
        tasks_per_node=FLAGS.tasks_per_node)
    cluster_spec = cluster.cluster_spec()
    print(cluster_spec)
    # Create a server for all mesh members
    server = tf.distribute.Server(cluster_spec, "mesh", cluster.task_id)
    print(server)

    if cluster.task_id > 0:
        server.join()

    # Otherwise we are the main task, let's define the devices
    devices = [
        "/job:mesh/task:%d/device:GPU:%d" % (i, j)
        for i in range(cluster_spec.num_tasks("mesh"))
        for j in range(FLAGS.gpus_per_task)
    ]
    print("List of devices", devices)

    mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
        mesh_shape, layout_rules, devices)

    ##Begin here
    klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)

    #If initc, run normal flowpm to generate data
    tf.reset_default_graph()
    if infield:
        tfic = linear_field(FLAGS.nc,
                            FLAGS.box_size,
                            ipklin,
                            batch_size=1,
                            seed=100)
        if FLAGS.nbody:
            state = lpt_init(tfic, a0=0.1, order=1)
            final_state = nbody(state, stages, FLAGS.nc)

        else:
            final_state = lpt_init(tfic, a0=stages[-1], order=1)
        tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0])

        start = time.time()
        with tf.Session(server.target) as sess:
            ic, fin = sess.run([tfic, tfinal_field])
        print("\nTime taken for the vanilla flowpm thingy :\n ",
              time.time() - start)

    else:
        ic = None

    tf.reset_default_graph()
    print('ic constructed')

    graph = mtf.Graph()
    mesh = mtf.Mesh(graph, "my_mesh")

    initial_conditions, final_field, input_field = nbody_prototype(
        mesh, infield, nc=FLAGS.nc, batch_size=FLAGS.batch_size)

    # Lower mesh computation

    start = time.time()
    lowering = mtf.Lowering(graph, {mesh: mesh_impl})
    restore_hook = mtf.MtfRestoreHook(lowering)
    end = time.time()
    print('\n Time for lowering : %f \n' % (end - start))

    tf_initc = lowering.export_to_tf_tensor(initial_conditions)
    tf_final = lowering.export_to_tf_tensor(final_field)
    nc = FLAGS.nc

    with tf.Session(server.target) as sess:

        start = time.time()
        if infield:
            ic_check, fin_check = sess.run([tf_initc, tf_final],
                                           feed_dict={input_field: ic})
        else:
            ic_check, fin_check = sess.run([tf_initc, tf_final])
            ic, fin = ic_check, fin_check
        print('\n Time for the mesh run : %f \n' % (time.time() - start))

    plt.figure(figsize=(15, 3))
    plt.subplot(141)
    plt.imshow(ic_check[0].sum(axis=2))
    plt.title('Initial Conditions')

    plt.subplot(142)
    plt.imshow(fin[0].sum(axis=2))
    plt.title('TensorFlow (single GPU)')
    plt.colorbar()

    plt.subplot(143)
    plt.imshow(fin_check[0].sum(axis=2))
    plt.title('Mesh TensorFlow')
    plt.colorbar()

    plt.subplot(144)
    plt.imshow((fin_check[0] - fin[0]).sum(axis=2))
    plt.title('Residuals')
    plt.colorbar()

    plt.savefig("comparison_mesh.png")

    exit(0)

    ##
    exit(0)
def main(_):
    cosmology = flowpm.cosmology.Planck15()
    # Create a simple Planck15 cosmology without neutrinos, and makes sure sigma8
    # is matched
    nbdykit_cosmo = Cosmology.from_astropy(Planck15.clone(m_nu=0 * u.eV))
    nbdykit_cosmo = nbdykit_cosmo.match(sigma8=cosmology.sigma8.numpy())

    # Compute the k vectora that will be needed in the PGD fit
    k, _ = flowpm.power_spectrum(tf.zeros([1] + [FLAGS.nc] * 3),
                                 boxsize=np.array([FLAGS.box_size] * 3),
                                 kmin=np.pi / FLAGS.box_size,
                                 dk=2 * np.pi / FLAGS.box_size)

    # Create some initial conditions
    klin = tf.constant(np.logspace(-4, 1, 512), dtype=tf.float32)
    pk = linear_matter_power(cosmology, klin)
    pk_fun = lambda x: tf.cast(
        tf.reshape(
            interpolate.interp_tf(tf.reshape(tf.cast(x, tf.float32), [-1]),
                                  klin, pk), x.shape), tf.complex64)

    initial_conditions = flowpm.linear_field(
        [FLAGS.nc, FLAGS.nc, FLAGS.nc],
        [FLAGS.box_size, FLAGS.box_size, FLAGS.box_size],
        pk_fun,
        batch_size=FLAGS.batch_size)

    initial_state = flowpm.lpt_init(cosmology, initial_conditions,
                                    FLAGS.a_init)
    stages = np.linspace(FLAGS.a_init, 1., FLAGS.nsteps, endpoint=True)

    print('Starting simulation')
    # Run the Nbody
    states = flowpm.nbody(cosmology,
                          initial_state,
                          stages, [FLAGS.nc, FLAGS.nc, FLAGS.nc],
                          pm_nc_factor=FLAGS.B,
                          return_intermediate_states=True)
    print('Simulation done')

    # Initialize PGD params
    alpha = tf.Variable([FLAGS.alpha0], dtype=tf.float32)
    scales = tf.Variable([FLAGS.kl0, FLAGS.ks0], dtype=tf.float32)
    optimizer = tf.keras.optimizers.Adam(learning_rate=FLAGS.learning_rate)

    params = []
    scale_factors = []
    # We begin by fitting the last time step
    for j, (a, state) in enumerate(states[::-1]):
        # Let's compute the target power spectrum at that scale factor
        target_pk = HalofitPower(nbdykit_cosmo,
                                 1. / a - 1.)(k).astype('float32')

        for i in range(FLAGS.niter if j == 0 else FLAGS.niter_refine):
            optimizer.minimize(
                partial(pgd_loss, alpha, scales, state, target_pk), [alpha] if
                (FLAGS.fix_scales and j > 0) else [alpha, scales])

            if i % 10 == 0:
                loss, pk = pgd_loss(alpha,
                                    scales,
                                    state,
                                    target_pk,
                                    return_pk=True)
                if i == 0:
                    pk0 = pk
                print("step %d, loss:" % i, loss)
        params.append(np.concatenate([alpha.numpy(), scales.numpy()]))
        scale_factors.append(a)
        print("Fitted params (alpha, kl, ks)", params[-1])

        plt.loglog(k, target_pk, "k")
        plt.loglog(k, pk0, ':', label='starting')
        plt.loglog(k, pk, '--', label='after n steps')
        plt.grid(which='both')
        plt.savefig('PGD_fit_%0.2f.png' % a)
        plt.close()

    pickle.dump(
        {
            'B': FLAGS.B,
            'nsteps': FLAGS.nsteps,
            'params': params,
            'scale_factors': scale_factors,
            'cosmology': cosmology.to_dict(),
            'boxsize': FLAGS.box_size,
            'nc': FLAGS.nc
        }, open(FLAGS.filename, "wb"))
Exemplo n.º 23
0
def benchmark_model(mesh):
  """
  Initializes a 3D volume with random noise, and execute a forward FFT
  """
  # Setup parameters
  bs = FLAGS.box_size
  nc = FLAGS.cube_size
  batch_size = FLAGS.batch_size
  a0 = FLAGS.a0
  a = 1.0
  nsteps = FLAGS.pm_steps

  # Compute a few things first, using simple tensorflow
  klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
  plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
  ipklin = iuspline(klin, plin)
  stages = np.linspace(a0, a, nsteps, endpoint=True)

  # Initialize the integration steps
  stages = np.linspace(FLAGS.a0, 1.0, FLAGS.pm_steps, endpoint=True)

  # Generate a batch of 3D initial conditions
  initial_conditions = flowpm.linear_field(
      nc,  # size of the cube
      bs,  # Physical size of the cube
      ipklin,  # Initial power spectrum
      batch_size=batch_size)

  # Compute necessary Fourier kernels
  kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)
  from flowpm.kernels import laplace_kernel, gradient_kernel
  lap = tf.cast(laplace_kernel(kvec), tf.complex64)
  grad_x = gradient_kernel(kvec, 0)
  grad_y = gradient_kernel(kvec, 1)
  grad_z = gradient_kernel(kvec, 2)

  # Define the named dimensions
  # Parameters of the small scales decomposition
  n_block_x = 8
  n_block_y = 4
  n_block_z = 1
  halo_size = 4

  # Parameters of the large scales decomposition
  downsampling_factor = 2
  lnc = nc // 2**downsampling_factor

  fx_dim = mtf.Dimension("nx", nc)
  fy_dim = mtf.Dimension("ny", nc)
  fz_dim = mtf.Dimension("nz", nc)

  tfx_dim = mtf.Dimension("tx", nc)
  tfy_dim = mtf.Dimension("ty", nc)
  tfz_dim = mtf.Dimension("tz", nc)

  # Dimensions of the low resolution grid
  tx_dim = mtf.Dimension("tx_lr", nc)
  ty_dim = mtf.Dimension("ty_lr", nc)
  tz_dim = mtf.Dimension("tz_lr", nc)

  nx_dim = mtf.Dimension('nx_block', n_block_x)
  ny_dim = mtf.Dimension('ny_block', n_block_y)
  nz_dim = mtf.Dimension('nz_block', n_block_z)

  sx_dim = mtf.Dimension('sx_block', nc // n_block_x)
  sy_dim = mtf.Dimension('sy_block', nc // n_block_y)
  sz_dim = mtf.Dimension('sz_block', nc // n_block_z)

  batch_dim = mtf.Dimension("batch", batch_size)
  pk_dim = mtf.Dimension("npk", len(plin))
  pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])

  # Compute necessary Fourier kernels
  kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)
  kx = mtf.import_tf_tensor(mesh,
                            kvec[0].squeeze().astype('float32'),
                            shape=[tfx_dim])
  ky = mtf.import_tf_tensor(mesh,
                            kvec[1].squeeze().astype('float32'),
                            shape=[tfy_dim])
  kz = mtf.import_tf_tensor(mesh,
                            kvec[2].squeeze().astype('float32'),
                            shape=[tfz_dim])
  kv = [ky, kz, kx]

  kvec_lr = flowpm.kernels.fftk([nc, nc, nc], symmetric=False)
  kx_lr = mtf.import_tf_tensor(mesh,
                               kvec_lr[0].squeeze().astype('float32'),
                               shape=[tx_dim])
  ky_lr = mtf.import_tf_tensor(mesh,
                               kvec_lr[1].squeeze().astype('float32'),
                               shape=[ty_dim])
  kz_lr = mtf.import_tf_tensor(mesh,
                               kvec_lr[2].squeeze().astype('float32'),
                               shape=[tz_dim])
  kv_lr = [ky_lr, kz_lr, kx_lr]

  # kvec for high resolution blocks
  shape = [batch_dim, fx_dim, fy_dim, fz_dim]
  lr_shape = [batch_dim, fx_dim, fy_dim, fz_dim]
  hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]
  part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]

  initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)
  state = mtfpm.lpt_init_single(
      initc,
      a0,
      kv_lr,
      halo_size,
      lr_shape,
      hr_shape,
      part_shape[1:],
      antialias=True,
  )
  #state = mtfpm.lpt_init(low, high, 0.1, kv_lr, kv_hr, halo_size, hr_shape, lr_shape,
  #                       part_shape[1:], downsampling_factor=downsampling_factor, antialias=True,)

  # Here we can run our nbody
  final_state = state  #mtfpm.nbody(state, stages, lr_shape, hr_shape, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)

  # paint the field
  final_field = mtf.zeros(mesh, shape=hr_shape)
  for block_size_dim in hr_shape[-3:]:
    final_field = mtf.pad(final_field, [halo_size, halo_size],
                          block_size_dim.name)
  final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)
  # Halo exchange
  for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):
    final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,
                                  halo_size)
  # Remove borders
  for block_size_dim in hr_shape[-3:]:
    final_field = mtf.slice(final_field, halo_size, block_size_dim.size,
                            block_size_dim.name)

  #final_field = mtf.reshape(final_field,  [batch_dim, fx_dim, fy_dim, fz_dim])
  # Hack usisng  custom reshape because mesh is pretty dumb
  final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],
                              output_dtype=tf.float32,
                              output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],
                              name='my_dumb_reshape',
                              splittable_dims=part_shape[:-1] + hr_shape[:4])

  return mtf.reduce_sum(final_field)
Exemplo n.º 24
0
def main(_):

    dtype = tf.float32

    startw = time.time()

    tf.random.set_random_seed(100)
    np.random.seed(100)

    # Compute a few things first, using simple tensorflow
    a0 = FLAGS.a0
    a = FLAGS.af
    nsteps = FLAGS.nsteps
    bs, nc = FLAGS.box_size, FLAGS.nc
    klin = np.loadtxt('../data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)
    stages = np.linspace(a0, a, nsteps, endpoint=True)

    tf.reset_default_graph()
    # Run normal flowpm to generate data
    try:
        ic, fin = np.load(fpath + 'ic.npy'), np.load(fpath + 'final.npy')
        print('Data loaded')
    except Exception as e:
        print('Exception occured', e)
        tfic = linear_field(FLAGS.nc,
                            FLAGS.box_size,
                            ipklin,
                            batch_size=1,
                            seed=100,
                            dtype=dtype)
        if FLAGS.nbody:
            state = lpt_init(tfic, a0=0.1, order=1)
            final_state = nbody(state, stages, FLAGS.nc)
        else:
            final_state = lpt_init(tfic, a0=stages[-1], order=1)
        tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0])
        with tf.Session() as sess:
            ic, fin = sess.run([tfic, tfinal_field])
        np.save(fpath + 'ic', ic)
        np.save(fpath + 'final', fin)

    tf.reset_default_graph()
    print('ic constructed')

    linear, final_field, update_ops, loss, chisq, prior, Rsm = recon_prototype(
        fin)

    #initial_conditions = recon_prototype(mesh, fin, nc=FLAGS.nc,  batch_size=FLAGS.batch_size, dtype=dtype)

    # Lower mesh computation

    with tf.Session() as sess:

        #ic_check, fin_check = sess.run([tf_initc, tf_final])
        #sess.run(tf_linear_op, feed_dict={input_field:ic})
        #ic_check, fin_check = sess.run([linear, final_field])
        #dg.saveimfig('-check', [ic_check, fin_check], [ic, fin], fpath)
        #dg.save2ptfig('-check', [ic_check, fin_check], [ic, fin], fpath, bs)

        #sess.run(tf_linear_op, feed_dict={input_field:np.random.normal(size=ic.size).reshape(ic.shape)})
        sess.run(tf.global_variables_initializer())
        ic0, fin0 = sess.run([linear, final_field])
        dg.saveimfig('-init', [ic0, fin0], [ic, fin], fpath)
        start = time.time()

        titer = 20
        niter = 201
        iiter = 0

        start0 = time.time()
        RRs = [4, 2, 1, 0.5, 0]
        lrs = np.array([0.1, 0.1, 0.1, 0.1, 0.1]) * 2
        #lrs = [0.1, 0.05, 0.01, 0.005, 0.001]
        for iR, zlR in enumerate(zip(RRs, lrs)):
            RR, lR = zlR
            for ff in [fpath + '/figs-R%02d' % (10 * RR)]:
                try:
                    os.makedirs(ff)
                except Exception as e:
                    print(e)

            for i in range(niter):
                iiter += 1
                sess.run(update_ops, {Rsm: RR})
                print(sess.run([loss, chisq, prior], {Rsm: RR}))
                if (i % titer == 0):
                    end = time.time()
                    print('Iter : ', i)
                    print('Time taken for %d iterations: ' % titer,
                          end - start)
                    start = end

                    ##
                    #ic1, fin1, cc, pp = sess.run([tf_initc, tf_final, tf_chisq, tf_prior], {R0:RR})
                    #ic1, fin1, cc, pp = sess.run([tf_initc, tf_final, tf_chisq, tf_prior], {R0:RR})
                    ic1, fin1 = sess.run([linear, final_field])
                    #print('Chisq and prior are : ', cc, pp)

                    dg.saveimfig(i, [ic1, fin1], [ic, fin],
                                 fpath + '/figs-R%02d' % (10 * RR))
                    dg.save2ptfig(i, [ic1, fin1], [ic, fin],
                                  fpath + '/figs-R%02d' % (10 * RR), bs)
            dg.saveimfig(i * (iR + 1), [ic1, fin1], [ic, fin], fpath + '/figs')
            dg.save2ptfig(i * (iR + 1), [ic1, fin1], [ic, fin],
                          fpath + '/figs', bs)

        ic1, fin1 = sess.run([linear, final_field])
        print('Total time taken for %d iterations is : ' % iiter,
              time.time() - start0)

    dg.saveimfig(i, [ic1, fin1], [ic, fin], fpath)
    dg.save2ptfig(i, [ic1, fin1], [ic, fin], fpath, bs)

    np.save(fpath + 'ic_recon', ic1)
    np.save(fpath + 'final_recon', fin1)
    print('Total wallclock time is : ', time.time() - start0)

    ##
    exit(0)
Exemplo n.º 25
0
def main(_):

    mesh_shape = [("row", FLAGS.nx), ("col", FLAGS.ny)]
    layout_rules = [("nx_lr", "row"), ("ny_lr", "col"), ("nx", "row"),
                    ("ny", "col"), ("ty_lr", "row"), ("tz_lr", "col"),
                    ("nx_block", "row"), ("ny_block", "col")]

    mesh_impl = HvdSimdMeshImpl(mtf.convert_to_shape(mesh_shape),
                                mtf.convert_to_layout_rules(layout_rules))

    # Build the model

    # Create computational graphs and some initializations

    graph = mtf.Graph()
    mesh = mtf.Mesh(graph, "nbody_mesh")

    # Compute a few things first, using simple tensorflow
    a0 = FLAGS.a0
    a = FLAGS.af
    nsteps = FLAGS.nsteps
    bs, nc = FLAGS.box_size, FLAGS.nc
    klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)
    stages = np.linspace(a0, a, nsteps, endpoint=True)

    #pt = PerturbationGrowth(cosmology, a=[a], a_normalize=1.0)
    # Generate a batch of 3D initial conditions
    initial_conditions = flowpm.linear_field(
        FLAGS.nc,  # size of the cube
        FLAGS.box_size,  # Physical size of the cube
        ipklin,  # Initial power spectrum
        batch_size=FLAGS.batch_size)
    cosmo = flowpm.cosmology.Planck15()
    state = lpt_init(cosmo, initial_conditions, a, order=1)
    final_state = state
    #final_state = nbody(cosmo, state, stages, nc)
    tfinal_field = cic_paint(tf.zeros_like(initial_conditions), final_state[0])

    # Compute necessary Fourier kernels
    kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)
    from flowpm.kernels import laplace_kernel, gradient_kernel
    lap = tf.cast(laplace_kernel(kvec), tf.complex64)
    grad_x = gradient_kernel(kvec, 0)
    grad_y = gradient_kernel(kvec, 1)
    grad_z = gradient_kernel(kvec, 2)
    derivs = [lap, grad_x, grad_y, grad_z]

    mesh_final_field = lpt_prototype(mesh,
                                     initial_conditions,
                                     derivs,
                                     bs=FLAGS.box_size,
                                     nc=FLAGS.nc,
                                     batch_size=FLAGS.batch_size)
    # Lower mesh computation
    lowering = mtf.Lowering(graph, {mesh: mesh_impl})

    # Retrieve output of computation
    result = lowering.export_to_tf_tensor(mesh_final_field)

    with tf.Session() as sess:
        a, b, c = sess.run([initial_conditions, tfinal_field, result])

    if comm.rank == 0:
        np.save('init', a)
        np.save('reference_final', b)
        np.save('mesh_pyramid', c)

        plt.figure(figsize=(15, 3))
        plt.subplot(141)
        plt.imshow(a[0].sum(axis=2))
        plt.title('Initial Conditions')

        plt.subplot(142)
        plt.imshow(b[0].sum(axis=2))
        plt.title('TensorFlow (single GPU)')
        plt.colorbar()

        plt.subplot(143)
        plt.imshow(c[0].sum(axis=2))
        plt.title('Mesh TensorFlow Single')
        plt.colorbar()

        plt.subplot(144)
        plt.imshow((b[0] - c[0]).sum(axis=2))
        plt.title('Residuals')
        plt.colorbar()

        plt.savefig("comparison-single.png")

    exit(0)
Exemplo n.º 26
0
def main(_):

    dtype = tf.float32
    mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)

    print(mesh_shape)

    #layout_rules = mtf.convert_to_layout_rules(FLAGS.layout)
    #mesh_shape = [("row", FLAGS.nx), ("col", FLAGS.ny)]
    layout_rules = [("nx_lr", "row"), ("ny_lr", "col"), ("nx", "row"),
                    ("ny", "col"), ("ty", "row"), ("tz", "col"),
                    ("ty_lr", "row"), ("tz_lr", "col"), ("nx_block", "row"),
                    ("ny_block", "col")]

    # Resolve the cluster from SLURM environment
    cluster = tf.distribute.cluster_resolver.SlurmClusterResolver(
        {"mesh": mesh_shape.size // FLAGS.gpus_per_task},
        port_base=8822,
        gpus_per_node=FLAGS.gpus_per_node,
        gpus_per_task=FLAGS.gpus_per_task,
        tasks_per_node=FLAGS.tasks_per_node)
    cluster_spec = cluster.cluster_spec()
    print(cluster_spec)
    # Create a server for all mesh members
    server = tf.distribute.Server(cluster_spec, "mesh", cluster.task_id)
    print(server)

    if cluster.task_id > 0:
        server.join()

    # Otherwise we are the main task, let's define the devices
    devices = [
        "/job:mesh/task:%d/device:GPU:%d" % (i, j)
        for i in range(cluster_spec.num_tasks("mesh"))
        for j in range(FLAGS.gpus_per_task)
    ]
    print("List of devices", devices)

    mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
        mesh_shape, layout_rules, devices)

    ##Begin here
    klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)

    tf.reset_default_graph()
    # Run normal flowpm to generate data
    try:
        ic, fin = np.load(fpath + 'ic.npy'), np.load(fpath + 'final.npy')
        print('Data loaded')
    except Exception as e:
        print('Exception occured', e)
        tfic = linear_field(FLAGS.nc,
                            FLAGS.box_size,
                            ipklin,
                            batch_size=1,
                            seed=100,
                            dtype=dtype)
        if FLAGS.nbody:
            state = lpt_init(tfic, a0=0.1, order=1)
            final_state = nbody(state, stages, FLAGS.nc)
        else:
            final_state = lpt_init(tfic, a0=stages[-1], order=1)
        tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0])
        with tf.Session(server.target) as sess:
            ic, fin = sess.run([tfic, tfinal_field])
        np.save(fpath + 'ic', ic)
        np.save(fpath + 'final', fin)

    tf.reset_default_graph()
    print('ic constructed')

    graph = mtf.Graph()
    mesh = mtf.Mesh(graph, "my_mesh")

    initial_conditions, final_field, loss, var_grads, update_op, linear_op, input_field, lr, R0 = recon_prototype(
        mesh, fin, nc=FLAGS.nc, batch_size=FLAGS.batch_size, dtype=dtype)

    # Lower mesh computation

    start = time.time()
    lowering = mtf.Lowering(graph, {mesh: mesh_impl})
    restore_hook = mtf.MtfRestoreHook(lowering)
    end = time.time()
    print('\n Time for lowering : %f \n' % (end - start))

    tf_initc = lowering.export_to_tf_tensor(initial_conditions)
    tf_final = lowering.export_to_tf_tensor(final_field)
    tf_grads = lowering.export_to_tf_tensor(var_grads[0])
    tf_linear_op = lowering.lowered_operation(linear_op)
    tf_update_ops = lowering.lowered_operation(update_op)
    n_block_x, n_block_y, n_block_z = FLAGS.nx, FLAGS.ny, 1
    nc = FLAGS.nc
    ic_hrshape = ic.reshape([
        FLAGS.batch_size, n_block_x, nc // n_block_x, n_block_y,
        nc // n_block_y, n_block_z, nc // n_block_z
    ])
    ic_hrshape = np.transpose(ic_hrshape, [0, 1, 3, 5, 2, 4, 6])
    with tf.Session(server.target) as sess:

        #ic_check, fin_check = sess.run([tf_initc, tf_final])
        sess.run(tf_linear_op, feed_dict={input_field: ic_hrshape})
        ic_check, fin_check = sess.run([tf_initc, tf_final])
        dg.saveimfig('-check', [ic_check, fin_check], [ic, fin], fpath)
        dg.save2ptfig('-check', [ic_check, fin_check], [ic, fin], fpath, bs)

        sess.run(tf_linear_op,
                 feed_dict={
                     input_field:
                     np.random.normal(size=ic.size).reshape(ic_hrshape.shape)
                 })
        ic0, fin0 = sess.run([tf_initc, tf_final])
        dg.saveimfig('-init', [ic0, fin0], [ic, fin], fpath)
        start = time.time()

        niter = 5
        iiter = 0
        start0 = time.time()
        RRs = [4, 2, 1, 0.5, 0]
        lrs = np.array([0.2, 0.15, 0.1, 0.1, 0.1])
        #lrs = [0.1, 0.05, 0.01, 0.005, 0.001]

        for iR, zlR in enumerate(zip(RRs, lrs)):
            RR, lR = zlR
            #for ff in [fpath + '/figs-R%02d'%(10*RR)]:
            for ff in [fpath + '/figsiter']:
                try:
                    os.makedirs(ff)
                except Exception as e:
                    print(e)
            for i in range(301):
                if (i % niter == 0):
                    end = time.time()
                    print('Iter : ', i)
                    print('Time taken for %d iterations: ' % niter,
                          end - start)
                    start = end
                    ##
                    ic1, fin1 = sess.run([tf_initc, tf_final])

                    #dg.saveimfig(i, [ic1, fin1], [ic, fin], fpath+'/figs-R%02d'%(10*RR))
                    #dg.save2ptfig(i, [ic1, fin1], [ic, fin], fpath+'/figs-R%02d'%(10*RR), bs)
                    dg.saveimfig2x2(iiter, [ic1, fin1], [ic, fin],
                                    fpath + '/figsiter')
                    #
                sess.run(tf_update_ops, {lr: lR, R0: RR})
                iiter += 1

            dg.saveimfig(i * (iR + 1), [ic1, fin1], [ic, fin], fpath + '/figs')
            dg.save2ptfig(i * (iR + 1), [ic1, fin1], [ic, fin],
                          fpath + '/figs', bs)

        ic1, fin1 = sess.run([tf_initc, tf_final])
        print('Total time taken for %d iterations is : ' % iiter,
              time.time() - start0)

    dg.saveimfig(i, [ic1, fin1], [ic, fin], fpath)
    dg.save2ptfig(i, [ic1, fin1], [ic, fin], fpath, bs)

    np.save(fpath + 'ic_recon', ic1)
    np.save(fpath + 'final_recon', fin1)
    print('Total wallclock time is : ', time.time() - start0)

    ##
    exit(0)
Exemplo n.º 27
0
def main(_):

    dtype = tf.float32
    mesh_shape = mtf.convert_to_shape(FLAGS.mesh_shape)

    startw = time.time()

    print(mesh_shape)
    ##
    ##
    ##Begin here
    klin = np.loadtxt('../data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('..//data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)

    tf.reset_default_graph()
    # Run normal flowpm to generate data
    try:
        ic, fin = np.load(fpath + 'ic.npy'), np.load(fpath + 'final.npy')
        print('Data loaded')
    except Exception as e:
        print('Exception occured', e)
        tfic = linear_field(FLAGS.nc,
                            FLAGS.box_size,
                            ipklin,
                            batch_size=1,
                            seed=100,
                            dtype=dtype)
        if FLAGS.nbody:
            state = lpt_init(tfic, a0=0.1, order=1)
            final_state = nbody(state, stages, FLAGS.nc)
        else:
            final_state = lpt_init(tfic, a0=stages[-1], order=1)
        tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0])
        with tf.Session() as sess:
            ic, fin = sess.run([tfic, tfinal_field])
        np.save(fpath + 'ic', ic)
        np.save(fpath + 'final', fin)

    print(ic.shape, fin.shape)
    ########################################################
    print(ic.shape, fin.shape)
    recon_estimator = tf.estimator.Estimator(model_fn=model_fn,
                                             model_dir=fpath)

    def eval_input_fn():
        features = {}
        features['data'] = fin
        features['R0'] = 0
        features['x0'] = None
        features['lr'] = 0
        return features, None

    # Train and evaluate model.

    RRs = [4., 2., 1., 0.5, 0.]
    niter = 200
    iiter = 0

    for R0 in RRs:
        print('\nFor iteration %d and R=%0.1f\n' % (iiter, R0))

        def train_input_fn():
            features = {}
            features['data'] = fin
            features['R0'] = R0
            features['x0'] = np.random.normal(size=fin.size).reshape(fin.shape)
            features['lr'] = 0.01
            return features, None

        for _ in range(1):
            recon_estimator.train(input_fn=train_input_fn,
                                  max_steps=iiter + niter)
            eval_results = recon_estimator.predict(input_fn=eval_input_fn,
                                                   yield_single_examples=False)

            for i, pred in enumerate(eval_results):
                if i > 0: break

            iiter += niter  #
            dg.saveimfig(iiter, [pred['ic'], pred['data']], [ic, fin],
                         fpath + '/figs/')
            dg.save2ptfig(iiter, [pred['ic'], pred['data']], [ic, fin],
                          fpath + '/figs/', bs)

    sys.exit(0)
Exemplo n.º 28
0
def main(_):

    dtype = tf.float32

    startw = time.time()

    tf.random.set_random_seed(100)
    np.random.seed(100)

    # Compute a few things first, using simple tensorflow
    a0 = FLAGS.a0
    a = FLAGS.af
    nsteps = FLAGS.nsteps
    bs, nc = FLAGS.box_size, FLAGS.nc
    klin = np.loadtxt('../data/Planck15_a1p00.txt').T[0]
    plin = np.loadtxt('../data/Planck15_a1p00.txt').T[1]
    ipklin = iuspline(klin, plin)
    stages = np.linspace(a0, a, nsteps, endpoint=True)

    tf.reset_default_graph()
    # Run normal flowpm to generate data
    try:
        ic, fin = np.load(fpath + 'ic.npy'), np.load(fpath + 'final.npy')
        print('Data loaded')
    except Exception as e:
        print('Exception occured', e)
        tfic = linear_field(FLAGS.nc,
                            FLAGS.box_size,
                            ipklin,
                            batch_size=1,
                            seed=100,
                            dtype=dtype)
        if FLAGS.nbody:
            state = lpt_init(tfic, a0=0.1, order=1)
            final_state = nbody(state, stages, FLAGS.nc)
        else:
            final_state = lpt_init(tfic, a0=stages[-1], order=1)
        tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0])
        with tf.Session() as sess:
            ic, fin = sess.run([tfic, tfinal_field])
        np.save(fpath + 'ic', ic)
        np.save(fpath + 'final', fin)

    k, pic = tools.power(ic[0] + 1, boxsize=bs)
    k, pfin = tools.power(fin[0], boxsize=bs)
    plt.plot(k, pic)
    plt.plot(k, pfin)
    plt.loglog()
    plt.grid(which='both')
    plt.savefig('pklin.png')
    plt.close()

    print(pic)
    print(pfin)
    #sys.exit(-1)

    ################################################################
    tf.reset_default_graph()
    print('ic constructed')

    noise = np.random.normal(0, 1, nc**3).reshape(fin.shape).astype(
        np.float32) * 1
    data_noised = fin + noise
    data = data_noised

    startpos = noise.copy().flatten().astype(np.float32)

    x0 = tf.placeholder(dtype=tf.float32,
                        shape=data.flatten().shape,
                        name='initlin')
    Rsm = tf.placeholder(tf.float32, name='smoothing')

    def recon_prototype(linearflat):
        """
        """

        linear = tf.reshape(linearflat, data.shape)
        #

        #loss = tf.reduce_sum(tf.square(linear - minimum))

        state = lpt_init(linear, a0=0.1, order=1)
        final_state = nbody(state, stages, FLAGS.nc)
        final_field = cic_paint(tf.zeros_like(linear), final_state[0])

        residual = final_field - data.astype(np.float32)
        base = residual
        Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
        smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
        basek = r2c3d(base, norm=nc**3)
        basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
        base = c2r3d(basek, norm=nc**3)
        #
        chisq = tf.multiply(base, base)
        chisq = tf.reduce_sum(chisq)
        chisq = tf.multiply(chisq, 1 / nc**3, name='chisq')

        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
        prior = tf.multiply(prior, 1 / nc**3, name='prior')
        #
        loss = chisq + prior

        grad = tf.gradients(loss, linearflat)
        print(grad)
        return loss, grad[0]

    @tf.function
    def min_lbfgs():
        return tfp.optimizer.lbfgs_minimize(
            #make_val_and_grad_fn(recon_prototype),
            recon_prototype,
            initial_position=x0,
            tolerance=1e-10,
            max_iterations=100)

    with tf.Session() as sess:
        start = time.time()
        results = sess.run(min_lbfgs(), {Rsm: 2, x0: startpos})
        print("\n")
        print(results)
        print("\n")
        minimum = results.position
        print(minimum)
        print("\nTime taken : ", time.time() - start)

        start = time.time()
        results = sess.run(min_lbfgs(), {Rsm: 1, x0: minimum})
        print("\n")
        print(results)
        minimum = results.position
        print("\n")
        print(minimum)
        print("\nTime taken : ", time.time() - start)

        start = time.time()
        results = sess.run(min_lbfgs(), {Rsm: 0, x0: minimum})
        print("\n")
        print(results)
        minimum = results.position
        print("\n")
        print(minimum)
        print("\nTime taken : ", time.time() - start)

    tf.reset_default_graph()
    print("\n")
    print('\nminimized\n')

    tfic = linear_field(
        FLAGS.nc, FLAGS.box_size, ipklin, batch_size=1, seed=100,
        dtype=dtype) * 0 + minimum.reshape(data_noised.shape)
    state = lpt_init(tfic, a0=0.1, order=1)
    final_state = nbody(state, stages, FLAGS.nc)
    tfinal_field = cic_paint(tf.zeros_like(tfic), final_state[0])
    with tf.Session() as sess:
        minic, minfin = sess.run([tfic, tfinal_field])

    dg.saveimfig(0, [minic, minfin], [ic, fin], fpath + '')
    dg.save2ptfig(0, [minic, minfin], [ic, fin], fpath + '', bs)

    np.save(fpath + 'recon0ic', minic)
    np.save(fpath + 'recon-final', minfin)

    ##
    exit(0)
Exemplo n.º 29
0
def main():

    startw = time.time()

    # Run normal flowpm to generate data
    try:
        ic, fin = np.load(fpath + 'ic.npy'), np.load(fpath + 'final.npy')
        print('Data loaded')
    except Exception as e:
        print('Exception occured', e)
        tfic = linear_field(nc,
                            bs,
                            ipklin,
                            batch_size=1,
                            seed=100,
                            dtype=dtype)
        tfinal_field = pm(tfic)
        ic, fin = tfic.numpy(), tfinal_field.numpy()
        np.save(fpath + 'ic', ic)
        np.save(fpath + 'final', fin)

    print('\ndata constructed\n')

    noise = np.random.normal(0, 1, nc**3).reshape(fin.shape).astype(np.float32)
    data_noised = fin + noise
    data = data_noised

    @tf.function
    def recon_prototype(linear, Rsm):
        """
        """

        linear = tf.reshape(linear, data.shape)
        #loss = tf.reduce_sum(tf.square(linear - minimum))
        final_field = pm(linear)

        residual = final_field - data.astype(np.float32)
        base = residual

        if anneal:
            print("\nAdd annealing section to graph\n")
            Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
            smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
            basek = r2c3d(base, norm=nc**3)
            basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
            base = c2r3d(basek, norm=nc**3)

        chisq = tf.multiply(base, base)
        chisq = tf.reduce_sum(chisq)
        chisq = tf.multiply(chisq, 1 / nc**3, name='chisq')

        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
        prior = tf.multiply(prior, 1 / nc**3, name='prior')
        #
        loss = chisq + prior

        return loss

    @tf.function
    def val_and_grad(x, Rsm):
        print("val and grad : ", x.shape)
        with tf.GradientTape() as tape:
            tape.watch(x)
            loss = recon_prototype(x, tf.constant(Rsm, dtype=tf.float32))
        grad = tape.gradient(loss, x)
        return loss, grad

    @tf.function
    def val_and_grad(x, Rsm):
        print("val and grad : ", x.shape)
        with tf.GradientTape() as tape:
            tape.watch(x)
            loss = recon_prototype(x, Rsm)
        grad = tape.gradient(loss, x)
        return loss, grad

    @tf.function
    def grad(x, Rsm):
        with tf.GradientTape() as tape:
            tape.watch(x)
            loss = recon_prototype(x, Rsm)
        grad = tape.gradient(loss, x)
        return grad

    #Function for LBFSG
    def func(x, RR):
        return [
            vv.numpy().astype(np.float64)
            for vv in val_and_grad(x=tf.constant(x, dtype=tf.float32),
                                   Rsm=tf.constant(RR, dtype=tf.float32))
        ]  #

    # Create an optimizer for Adam.
    opt = tf.keras.optimizers.Adam(learning_rate=lr)

    #Loop it Reconstruction
    ##Reconstruction
    x0 = np.random.normal(0, 1, nc**3).reshape(fin.shape).astype(np.float32)
    linear = tf.Variable(name='linmesh',
                         shape=(1, nc, nc, nc),
                         dtype=tf.float32,
                         initial_value=x0,
                         trainable=True)

    for iR, RR in enumerate(RRs):

        if optimizer == 'lbfgs':
            results = sopt.minimize(fun=func,
                                    x0=x0,
                                    args=RR,
                                    jac=True,
                                    method='L-BFGS-B',
                                    tol=1e-10,
                                    options={
                                        'maxiter': niter,
                                        'ftol': 1e-12,
                                        'gtol': 1e-12,
                                        'eps': 1e-12
                                    })
            #results = sopt.minimize(fun=func, x0=x0, args = RR, jac=True, method='L-BFGS-B',
            #                    options={'maxiter':niter})
            print(results)
            minic = results.x.reshape(data.shape)

        elif optimizer == 'adam':
            for i in range(niter):
                grads = grad([linear], tf.constant(RR, dtype=tf.float32))
                opt.apply_gradients(zip(grads, [linear]))
            minic = linear.numpy().reshape(data.shape)

        #
        print('\nminimized\n')
        minfin = pm(tf.constant(minic, dtype=tf.float32)).numpy()
        dg.saveimfig("-R%d" % RR, [minic, minfin], [ic, fin], fpath + '')
        dg.save2ptfig("-R%d" % RR, [minic, minfin], [ic, fin], fpath + '', bs)
        ###
        x0 = minic
        np.save(fpath + 'ic-%d' % iR, minic)
        np.save(fpath + 'final-%d' % iR, minfin)

    exit(0)
Exemplo n.º 30
0
def main():

    startw = time.time()

    # Run normal flowpm to generate data
    try:
        ic, fin = np.load(fpath + 'ic.npy'), np.load(fpath + 'final.npy')
        print('Data loaded')
    except Exception as e:
        print('Exception occured', e)
        tfic = linear_field(nc,
                            bs,
                            ipklin,
                            batch_size=1,
                            seed=100,
                            dtype=dtype)
        tfinal_field = pm(tfic)
        ic, fin = tfic.numpy(), tfinal_field.numpy()
        np.save(fpath + 'ic', ic)
        np.save(fpath + 'final', fin)

    print('\ndata constructed\n')

    noise = np.random.normal(0, 1, nc**3).reshape(fin.shape).astype(np.float32)
    data_noised = fin + noise
    data = data_noised

    @tf.function
    def recon_prototype(linear, Rsm=0):
        """
        """

        linear = tf.reshape(linear, data.shape)
        #loss = tf.reduce_sum(tf.square(linear - minimum))
        final_field = pm(linear)

        residual = final_field - data.astype(np.float32)
        base = residual

        if anneal:
            print("\nAdd annealing section to graph\n")
            Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
            smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
            basek = r2c3d(base, norm=nc**3)
            basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
            base = c2r3d(basek, norm=nc**3)

    ###
        chisq = tf.multiply(base, base)
        chisq = tf.reduce_sum(chisq)
        chisq = tf.multiply(chisq, 1 / nc**3, name='chisq')

        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
        prior = tf.multiply(prior, 1 / nc**3, name='prior')
        #
        loss = chisq + prior

        return loss
#

    @tf.function
    def val_and_grad(x, RR):
        with tf.GradientTape() as tape:
            tape.watch(x)
            loss = recon_prototype(x, RR)
        grad = tape.gradient(loss, x)
        return loss, grad

    @tf.function
    def min_lbfgs(x0, RR):
        return tfp.optimizer.lbfgs_minimize(
            lambda x: val_and_grad(x, tf.constant(RR, dtype=tf.float32)),
            initial_position=x0,
            tolerance=1e-10,
            max_iterations=200)


#
#    def make_val_and_grad_fn(value_fn, R):
#        @functools.wraps(value_fn)
#        def val_and_grad(x):
#            return tfp.math.value_and_gradient(value_fn, x)
#        return val_and_grad
#
#    @tf.function
#    def min_lbfgs(x0, RR):
#        return tfp.optimizer.lbfgs_minimize(
#            make_val_and_grad_fn(recon_prototype),
#            recon_prototype,
#            initial_position=x0,
#            tolerance=1e-10,
#            max_iterations=100)
#
##Reconstruction

    x0 = np.random.normal(0, 1, nc**3).reshape(fin.shape).astype(
        np.float32).flatten()

    RRs = [2, 1, 0.5, 0]
    for iR, RR in enumerate(RRs):

        results = min_lbfgs(x0, RR)
        #results = sopt.minimize(fun=func, x0=x0, args = RR, jac=True, method='L-BFGS-B',
        #                        options={'maxiter':200, 'ftol': 2.220446049250313e-09, 'gtol': 1e-10,})

        print(results)
        ###
        minic = results.position.numpy().reshape(data.shape)
        print(minic.shape)
        print('\nminimized\n')
        minfin = pm(tf.constant(minic, dtype=tf.float32)).numpy()
        dg.saveimfig("-R%d" % RR, [minic, minfin], [ic, fin], fpath + '')
        dg.save2ptfig("-R%d" % RR, [minic, minfin], [ic, fin], fpath + '', bs)