コード例 #1
0
def recon_dm(linear, data):

    print('new graph')
    final_field = pm(linear)
    residual = final_field - data #.astype(np.float32)
    chisq = tf.multiply(residual, residual)
    chisq = tf.reduce_mean(chisq)                             
    lineark = r2c3d(linear, norm=nc**3)
    priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
    prior = tf.reduce_mean(tf.multiply(priormesh, 1/priorwt))
    loss = chisq + prior

    return loss, chisq, prior
コード例 #2
0
def lpt2_source(dlin_k, kvec=None, name="LPT2Source"):
    """ Generate the second order LPT source term.

  Parameters:
  -----------
  dlin_k: TODO: @modichirag add documentation

  Returns:
  --------
  source: tensor (batch_size, nc, nc, nc)
    Source term
  """
    with tf.name_scope(name):
        dlin_k = tf.convert_to_tensor(dlin_k, name="lineark")

        shape = dlin_k.get_shape()
        batch_size, nc = shape[0], shape[1:]
        if kvec is None:
            kvec = fftk(nc, symmetric=False)
        source = tf.zeros(tf.shape(dlin_k))
        D1 = [1, 2, 0]
        D2 = [2, 0, 1]

        phi_ii = []
        # diagnoal terms
        lap = tf.cast(laplace_kernel(kvec), tf.complex64)

        for d in range(3):
            grad = gradient_kernel(kvec, d)
            kweight = lap * grad * grad
            phic = tf.multiply(dlin_k, kweight)
            phi_ii.append(c2r3d(phic, norm=nc[0] * nc[1] * nc[2]))

        for d in range(3):
            source = tf.add(source, tf.multiply(phi_ii[D1[d]], phi_ii[D2[d]]))

        # free memory
        phi_ii = []

        # off-diag terms
        for d in range(3):
            gradi = gradient_kernel(kvec, D1[d])
            gradj = gradient_kernel(kvec, D2[d])
            kweight = lap * gradi * gradj
            phic = tf.multiply(dlin_k, kweight)
            phi = c2r3d(phic, norm=nc[0] * nc[1] * nc[2])
            source = tf.subtract(source, tf.multiply(phi, phi))

        source = tf.multiply(source, 3.0 / 7.)
        return r2c3d(source, norm=nc[0] * nc[1] * nc[2])
コード例 #3
0
ファイル: test_utils.py プロジェクト: mrzyzhaozeyu/flowpm
def test_r2c2r():
    bs = 50
    nc = 16
    batch_size = 3
    base = 100 * np.random.randn(batch_size, nc, nc, nc).astype(np.float64)

    with tf.Session() as sess:
        cfield = r2c3d(tf.constant(base, dtype=tf.float64),
                       dtype=tf.complex128)
        rfield = c2r3d(cfield, dtype=tf.float64)
        sess.run(tf.global_variables_initializer())
        rec = sess.run(rfield)

    assert_allclose(base, rec, rtol=1e-09)
コード例 #4
0
def recon_prototype(linear):
    """
    """

    linear = tf.reshape(linear, minimum.shape)
    #loss = tf.reduce_sum(tf.square(linear - minimum))

    state = lpt_init(linear, a0=0.1, order=1)
    final_state = nbody(state, stages, FLAGS.nc)
    final_field = cic_paint(tf.zeros_like(linear), final_state[0])

    residual = final_field - data.astype(np.float32)
    base = residual
    Rsm = tf.placeholder(tf.float32, name='smoothing')
    if FLAGS.anneal:
        print("\nAdd annealing section to graph\n")
        Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
        smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
        basek = r2c3d(base, norm=nc**3)
        basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
        base = c2r3d(basek, norm=nc**3)


#
    chisq = tf.multiply(base, base)
    chisq = tf.reduce_sum(chisq)
    chisq = tf.multiply(chisq, 1 / nc**3, name='chisq')

    #Prior
    lineark = r2c3d(linear, norm=nc**3)
    priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
    prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
    prior = tf.multiply(prior, 1 / nc**3, name='prior')
    #
    loss = chisq + prior

    return loss
コード例 #5
0
    def recon_prototype(linearflat):
        """
        """

        linear = tf.reshape(linearflat, data.shape)
        #

        #loss = tf.reduce_sum(tf.square(linear - minimum))

        state = lpt_init(linear, a0=0.1, order=1)
        final_state = nbody(state, stages, FLAGS.nc)
        final_field = cic_paint(tf.zeros_like(linear), final_state[0])

        residual = final_field - data.astype(np.float32)
        base = residual
        Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
        smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
        basek = r2c3d(base, norm=nc**3)
        basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
        base = c2r3d(basek, norm=nc**3)
        #
        chisq = tf.multiply(base, base)
        chisq = tf.reduce_sum(chisq)
        chisq = tf.multiply(chisq, 1 / nc**3, name='chisq')

        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
        prior = tf.multiply(prior, 1 / nc**3, name='prior')
        #
        loss = chisq + prior

        grad = tf.gradients(loss, linearflat)
        print(grad)
        return loss, grad[0]
コード例 #6
0
ファイル: modelpoisson.py プロジェクト: modichirag/galference
    def recon(self, linear, data):
        """                                                                                                                
        """
        args = self.args
        print('new recon graph')
        base = self.pm(linear)

        galmean = tfp.distributions.Poisson(rate=args.plambda * (1 + base))
        logprob = -tf.reduce_mean(galmean.log_prob(data))
        #logprob = tf.multiply(logprob, 1/nc**3, name='logprob')
        #Prior
        lineark = r2c3d(linear, norm=args.nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_mean(tf.multiply(priormesh, 1 / args.priorwt))
        #prior = tf.multiply(prior, 1/nc**3, name='prior')

        loss = logprob + prior
        return loss, logprob, prior
コード例 #7
0
def test_lpt2():
    """ Checking lpt2_source, this also checks the laplace and gradient kernels
  """
    pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc], dtype='f4')
    grid = pm.generate_uniform_particle_grid(shift=0).astype(np.float32)

    whitec = pm.generate_whitenoise(100, mode='complex', unitary=False)
    lineark = whitec.apply(lambda k, v: Planck15.get_pklin(
        sum(ki**2 for ki in k)**0.5, 0)**0.5 * v / v.BoxSize.prod()**0.5)

    # Compute lpt1 from fastpm with matching kernel order
    source = fpmops.lpt2source(lineark).c2r()

    # Same thing from tensorflow
    tfsource = tfpm.lpt2_source(
        pmutils.r2c3d(tf.expand_dims(np.array(lineark.c2r()), axis=0)))
    tfread = pmutils.c2r3d(tfsource).numpy()

    assert_allclose(source, tfread[0], atol=1e-5)
コード例 #8
0
def recon_dm(linear, data):
    """                                                                                                                                                   
    """
    print('new graph')
    final_field = pm(linear)
    residual = final_field - data

    chisq = tf.multiply(residual, residual)
    chisq = tf.reduce_mean(chisq)
    #     chisq = tf.multiply(chisq, 1/nc**3, name='chisq')

    #Prior
    lineark = r2c3d(linear, norm=nc**3)
    priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
    prior = tf.reduce_mean(tf.multiply(priormesh, 1 / priorwt))
    #     prior = tf.multiply(prior, 1/nc**3, name='prior')
    #
    loss = chisq + prior

    return loss, chisq, prior
コード例 #9
0
ファイル: poisson_data.py プロジェクト: modichirag/galference
def pm_poisson():
    print("PM graph")
    linear = flowpm.linear_field(nc, bs, ipklin, batch_size=batch_size)
    if args.nbody:
        print('Nobdy sim')
        state = lpt_init(linear, a0=a0, order=args.lpt_order)
        final_state = nbody(state,  stages, nc)
    else:
        print('ZA/2LPT sim')
        final_state = lpt_init(linear, a0=af, order=args.lpt_order)
    tfinal_field = cic_paint(tf.zeros_like(linear), final_state[0])
    base = tfinal_field
    if Rsm != 0:
         basek = r2c3d(tfinal_field, norm=nc**3)
         basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
         base = c2r3d(basek, norm=nc**3)

    galmean = tfp.distributions.Poisson(rate = plambda * (1 + base))
    result = galmean.sample()
    return linear, tfinal_field, result, base
コード例 #10
0
def test_lpt1_64():
    """ Checking lpt1, this also checks the laplace and gradient kernels
  This variant of the test checks that it works for cubes of size 64
  """
    nc = 64
    pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc], dtype='f4')
    grid = pm.generate_uniform_particle_grid(shift=0).astype(np.float32)

    whitec = pm.generate_whitenoise(100, mode='complex', unitary=False)
    lineark = whitec.apply(lambda k, v: Planck15.get_pklin(
        sum(ki**2 for ki in k)**0.5, 0)**0.5 * v / v.BoxSize.prod()**0.5)

    # Compute lpt1 from fastpm with matching kernel order
    lpt = fpmops.lpt1(lineark, grid)

    # Same thing from tensorflow
    tfread = tfpm.lpt1(
        pmutils.r2c3d(tf.expand_dims(np.array(lineark.c2r()), axis=0)),
        grid.reshape((1, -1, 3)) * nc / bs).numpy()

    assert_allclose(lpt, tfread[0] * bs / nc, atol=5e-5)
コード例 #11
0
ファイル: test_tfpm.py プロジェクト: mrzyzhaozeyu/flowpm
def test_lpt1():
    """ Checking lpt1, this also checks the laplace and gradient kernels
  """
    pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc], dtype='f4')
    grid = pm.generate_uniform_particle_grid(shift=0).astype(np.float32)

    whitec = pm.generate_whitenoise(100, mode='complex', unitary=False)
    lineark = whitec.apply(lambda k, v: Planck15.get_pklin(
        sum(ki**2 for ki in k)**0.5, 0)**0.5 * v / v.BoxSize.prod()**0.5)

    # Compute lpt1 from fastpm with matching kernel order
    lpt = fpmops.lpt1(lineark, grid)

    # Same thing from tensorflow
    with tf.Session() as sess:
        state = tfpm.lpt1(
            pmutils.r2c3d(tf.expand_dims(tf.constant(lineark.c2r()), axis=0)),
            grid.reshape((1, -1, 3)) * nc / bs)
        tfread = sess.run(state)

    assert_allclose(lpt, tfread[0] * bs / nc, atol=1e-5)
コード例 #12
0
ファイル: rim_utils1d.py プロジェクト: modichirag/galference
def tfpowerspec(x, boxsize):
    nc = x.shape[-1]
    kvec = flowpm.kernels.fftk([nc, nc, nc], symmetric=False)
    kmesh = sum((kk / boxsize * nc)**2 for kk in kvec)**0.5
    kmesh = np.expand_dims(kmesh, 0).astype(float32)
    kbinmap = np.digitize(kmesh, kedges, right=False).astype(int32)
    kbinmap[kbinmap == kbinmap.max()] = kbinmap.max() - 1
    kbinmap -= 1
    kbinmap = tf.constant(kbinmap)
    kbincount = tfp.stats.count_integers(kbinmap)
    tfkmesh = tf.constant(kmesh)
    kvals = tfp.stats.count_integers(kbinmap, weights=tfkmesh) / tf.cast(
        kbincount, tf.float32)
    kvals = tf.repeat(tf.reshape(kvals, (1, nc)), x.shape[0], 0)
    #
    pmesh = tf.square(tf.abs(r2c3d(x, norm=nc**3))) * boxsize**3
    tfpower = tfp.stats.count_integers(tf.repeat(kbinmap, pmesh.shape[0], 0),
                                       weights=pmesh,
                                       axis=[1, 2, 3])
    tfpower = tf.reshape(tfpower, (nc, x.shape[0]))
    tfpower = tf.transpose(tfpower) / tf.cast(kbincount, tf.float32)
    return kvals, tfpower
コード例 #13
0
def recon_prototype(data,
                    anneal=True,
                    nc=FLAGS.nc,
                    bs=FLAGS.box_size,
                    batch_size=FLAGS.batch_size,
                    a0=FLAGS.a0,
                    a=FLAGS.af,
                    nsteps=FLAGS.nsteps,
                    dtype=tf.float32):
    """
    Prototype of function computing LPT deplacement.

    Returns output tensorflow and mesh tensorflow tensors
    """
    if dtype == tf.float32:
        npdtype = "float32"
        cdtype = tf.complex64
    elif dtype == tf.float64:
        npdtype = "float64"
        cdtype = tf.complex128
    print(dtype, npdtype)

    #graph = mtf.Graph()
    #mesh = mtf.Mesh(graph, "my_mesh")

    linear = tf.get_variable('linmesh',
                             shape=(1, nc, nc, nc),
                             dtype=tf.float32,
                             initializer=tf.random_normal_initializer(),
                             trainable=True)

    state = lpt_init(linear, a0=0.1, order=1)
    final_state = nbody(state, stages, FLAGS.nc)
    final_field = cic_paint(tf.zeros_like(linear), final_state[0])

    residual = final_field - data.astype(np.float32)
    base = residual
    ##Anneal
    Rsm = tf.placeholder(tf.float32, name='smoothing')
    if anneal:
        #def anneal
        Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
        smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
        basek = r2c3d(base, norm=nc**3)
        basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
        base = c2r3d(basek, norm=nc**3)

    chisq = tf.multiply(base, base)
    chisq = tf.reduce_sum(chisq)
    #chisq = tf.multiply(chisq, 1/nc**3, name='chisq')

    #Prior
    lineark = r2c3d(linear, norm=nc**3)
    priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
    prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
    #prior = tf.multiply(prior, 1/nc**3, name='prior')
    #

    loss = chisq + prior

    ##    #optimizer = tf.optimize.AdamWeightDecayOptimizer(0.01)
    ##    opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)
    ##
    ##    # Compute the gradients for a list of variables.
    ##    grads_and_vars = opt.compute_gradients(loss, [linear])
    ##    print("\ngradients : ", grads_and_vars)
    ##    update_ops = opt.apply_gradients(grads_and_vars)
    ##
    ##    #optimizer = tf.keras.optimizers.Adam(0.01)
    ##    #var_grads = tf.gradients([loss], [linear])
    ##
    ##
    ##    #update_ops = optimizer.apply_gradients(var_grads, linear)
    ##    #update_ops = optimizer.apply_gradients(zip(var_grads, [linear]))
    ##    #update_ops = None
    ##    #lr = tf.placeholder(tf.float32, shape=())
    ##    #update_op = mtf.assign(fieldvar, fieldvar - var_grads[0]*lr)
    ##
    return linear, final_field, loss, chisq, prior
コード例 #14
0
def recon_model(data, sigma=0.01**0.5, maxiter=100, anneal=False, dataovd=False, gtol=1e-5):

    #bs, nc = config['boxsize'], config['nc']
    kvec = flowpm.kernels.fftk([nc, nc, nc], symmetric=False)
    kmesh = sum(kk**2 for kk in kvec)**0.5
    priorwt = ipklin(kmesh) * bs ** -3 
    
    g = tf.Graph()

    with g.as_default():
        
        initlin = tf.placeholder(tf.float32, data.shape, name='initlin')
        linear = tf.get_variable('linmesh', shape=(nc, nc, nc), 
                             initializer=tf.random_normal_initializer(), trainable=True)
        initlin_op = linear.assign(initlin, name='initlin_op')
        #PM
        icstate = tfpm.lptinit(linear, FLAGS.a0, name='icstate')
        fnstate = tfpm.nbody(icstate, stages, nc, name='fnstate')
        final = tf.zeros_like(linear)
        final = cic_paint(final, fnstate[0], name='final')
        if dataovd:
            print('\Converting final density to overdensity because data is that\n')
            fmean = tf.reduce_mean(final)
            final = tf.multiply(final, 1/ fmean)
            final = final - 1
        #
        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1/priorwt))
        prior = tf.multiply(prior, 1/nc**3, name='prior')

        likelihood = tf.subtract(final, data)
        likelihood = tf.multiply(likelihood, 1/sigma)
        #galmean = tfp.distributions.Poisson(rate = plambda * (1 + finalfield))
        #logprob = galmean.log_prob(data)

        ##Anneal
        Rsm = tf.placeholder(tf.float32, name='smoothing')
        if anneal :
            print('\nAdding annealing part to graph\n')
            Rsm = tf.multiply(Rsm, bs/nc)
            Rsmsq = tf.multiply(Rsm, Rsm)
            smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
            likelihood = tf.squeeze(likelihood)
            likelihoodk = r2c3d(likelihood, norm=nc**3)
            likelihoodk = tf.multiply(likelihoodk, tf.cast(smwts, tf.complex64))
            residual = c2r3d(likelihoodk, norm=nc**3)
        else:
            residual = tf.identity(likelihood)
            
        chisq = tf.multiply(residual, residual)
        chisq = tf.reduce_sum(chisq)
        chisq = tf.multiply(chisq, 1/nc**3, name='chisq')

        loss = tf.add(chisq, prior, name='loss')
        
        #optimizer = ScipyOptimizerInterface(loss, var_list=[linear], method='L-BFGS-B', 
        #                                    options={'maxiter': maxiter, 'gtol':gtol})

        optimizer = tf.optimize.AdamWeightDecayOptimizer(0.01)        
        var_grads = tf.gradients(
            [loss], [linear])

        update_ops = optimizer.apply_grads(var_grads, linear)

        
        tf.add_to_collection('inits', [initlin_op, initlin])
        #tf.add_to_collection('opt', optimizer)
        tf.add_to_collection('opt', update_ops)
        tf.add_to_collection('diagnostics', [prior, chisq, loss])
        tf.add_to_collection('reconpm', [linear, final, fnstate])
        tf.add_to_collection('data', data)
    return g