Example #1
0
def test_nody():
    """ Checking end to end nbody
  """
    a0 = 0.1

    pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc], dtype='f4')
    grid = pm.generate_uniform_particle_grid(shift=0).astype(np.float32)
    solver = Solver(pm, Planck15, B=1)
    stages = np.linspace(0.1, 1.0, 10, endpoint=True)

    # Generate initial state with fastpm
    whitec = pm.generate_whitenoise(100, mode='complex', unitary=False)
    lineark = whitec.apply(lambda k, v: Planck15.get_pklin(
        sum(ki**2 for ki in k)**0.5, 0)**0.5 * v / v.BoxSize.prod()**0.5)
    statelpt = solver.lpt(lineark, grid, a0, order=1)
    finalstate = solver.nbody(statelpt, leapfrog(stages))
    final_cube = pm.paint(finalstate.X)

    # Same thing with flowpm
    tlinear = tf.expand_dims(np.array(lineark.c2r()), 0)
    state = tfpm.lpt_init(tlinear, a0, order=1)
    state = tfpm.nbody(state, stages, nc)
    tfread = pmutils.cic_paint(tf.zeros_like(tlinear), state[0]).numpy()

    assert_allclose(final_cube, tfread[0], atol=1.2)
def recon_model(data, sigma=0.01**0.5, maxiter=100, anneal=False, dataovd=False, gtol=1e-5):

    #bs, nc = config['boxsize'], config['nc']
    kvec = flowpm.kernels.fftk([nc, nc, nc], symmetric=False)
    kmesh = sum(kk**2 for kk in kvec)**0.5
    priorwt = ipklin(kmesh) * bs ** -3 
    
    g = tf.Graph()

    with g.as_default():
        
        initlin = tf.placeholder(tf.float32, data.shape, name='initlin')
        linear = tf.get_variable('linmesh', shape=(nc, nc, nc), 
                             initializer=tf.random_normal_initializer(), trainable=True)
        initlin_op = linear.assign(initlin, name='initlin_op')
        #PM
        icstate = tfpm.lptinit(linear, FLAGS.a0, name='icstate')
        fnstate = tfpm.nbody(icstate, stages, nc, name='fnstate')
        final = tf.zeros_like(linear)
        final = cic_paint(final, fnstate[0], name='final')
        if dataovd:
            print('\Converting final density to overdensity because data is that\n')
            fmean = tf.reduce_mean(final)
            final = tf.multiply(final, 1/ fmean)
            final = final - 1
        #
        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1/priorwt))
        prior = tf.multiply(prior, 1/nc**3, name='prior')

        likelihood = tf.subtract(final, data)
        likelihood = tf.multiply(likelihood, 1/sigma)
        #galmean = tfp.distributions.Poisson(rate = plambda * (1 + finalfield))
        #logprob = galmean.log_prob(data)

        ##Anneal
        Rsm = tf.placeholder(tf.float32, name='smoothing')
        if anneal :
            print('\nAdding annealing part to graph\n')
            Rsm = tf.multiply(Rsm, bs/nc)
            Rsmsq = tf.multiply(Rsm, Rsm)
            smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
            likelihood = tf.squeeze(likelihood)
            likelihoodk = r2c3d(likelihood, norm=nc**3)
            likelihoodk = tf.multiply(likelihoodk, tf.cast(smwts, tf.complex64))
            residual = c2r3d(likelihoodk, norm=nc**3)
        else:
            residual = tf.identity(likelihood)
            
        chisq = tf.multiply(residual, residual)
        chisq = tf.reduce_sum(chisq)
        chisq = tf.multiply(chisq, 1/nc**3, name='chisq')

        loss = tf.add(chisq, prior, name='loss')
        
        #optimizer = ScipyOptimizerInterface(loss, var_list=[linear], method='L-BFGS-B', 
        #                                    options={'maxiter': maxiter, 'gtol':gtol})

        optimizer = tf.optimize.AdamWeightDecayOptimizer(0.01)        
        var_grads = tf.gradients(
            [loss], [linear])

        update_ops = optimizer.apply_grads(var_grads, linear)

        
        tf.add_to_collection('inits', [initlin_op, initlin])
        #tf.add_to_collection('opt', optimizer)
        tf.add_to_collection('opt', update_ops)
        tf.add_to_collection('diagnostics', [prior, chisq, loss])
        tf.add_to_collection('reconpm', [linear, final, fnstate])
        tf.add_to_collection('data', data)
    return g