예제 #1
0
def lpt2_source(dlin_k, kvec=None, name="LPT2Source"):
    """ Generate the second order LPT source term.

  Parameters:
  -----------
  dlin_k: TODO: @modichirag add documentation

  Returns:
  --------
  source: tensor (batch_size, nc, nc, nc)
    Source term
  """
    with tf.name_scope(name):
        dlin_k = tf.convert_to_tensor(dlin_k, name="lineark")

        shape = dlin_k.get_shape()
        batch_size, nc = shape[0], shape[1:]
        if kvec is None:
            kvec = fftk(nc, symmetric=False)
        source = tf.zeros(tf.shape(dlin_k))
        D1 = [1, 2, 0]
        D2 = [2, 0, 1]

        phi_ii = []
        # diagnoal terms
        lap = tf.cast(laplace_kernel(kvec), tf.complex64)

        for d in range(3):
            grad = gradient_kernel(kvec, d)
            kweight = lap * grad * grad
            phic = tf.multiply(dlin_k, kweight)
            phi_ii.append(c2r3d(phic, norm=nc[0] * nc[1] * nc[2]))

        for d in range(3):
            source = tf.add(source, tf.multiply(phi_ii[D1[d]], phi_ii[D2[d]]))

        # free memory
        phi_ii = []

        # off-diag terms
        for d in range(3):
            gradi = gradient_kernel(kvec, D1[d])
            gradj = gradient_kernel(kvec, D2[d])
            kweight = lap * gradi * gradj
            phic = tf.multiply(dlin_k, kweight)
            phi = c2r3d(phic, norm=nc[0] * nc[1] * nc[2])
            source = tf.subtract(source, tf.multiply(phi, phi))

        source = tf.multiply(source, 3.0 / 7.)
        return r2c3d(source, norm=nc[0] * nc[1] * nc[2])
def whitenoise_to_linear(nc,
                         boxsize,
                         whitec,
                         pk,
                         kvec=None,
                         batch_size=1,
                         seed=None,
                         dtype=tf.float32,
                         name="LinearField"):
  """Generates a linear field with a given linear power spectrum and whitenoise realization
  """
  with tf.name_scope(name):
    # Transform nc to a list of necessary
    if isinstance(nc, int):
      nc = [nc, nc, nc]
    if isinstance(boxsize, int) or isinstance(boxsize, float):
      boxsize = [boxsize, boxsize, boxsize]

    if kvec is None:
      kvec = fftk(nc, symmetric=False)
    kmesh = sum((kk / boxsize[i] * nc[i])**2 for i, kk in enumerate(kvec))**0.5
    pkmesh = pk(kmesh)

    lineark = tf.multiply(whitec, (pkmesh /
                                   (boxsize[0] * boxsize[1] * boxsize[2]))**0.5)
    linear = c2r3d(lineark, norm=nc[0] * nc[1] * nc[2], name=name, dtype=dtype)
    return linear
예제 #3
0
    def recon_prototype(linear, Rsm=0):
        """
        """

        linear = tf.reshape(linear, data.shape)
        #loss = tf.reduce_sum(tf.square(linear - minimum))
        final_field = pm(linear)
        base = final_field

        if anneal:
            print('\nAdd annealing graph\n')
            Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
            smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
            basek = r2c3d(base, norm=nc**3)
            basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
            base = c2r3d(basek, norm=nc**3)

        galmean = tfp.distributions.Poisson(rate=plambda * (1 + base))
        sample = galmean.sample()
        logprob = -tf.reduce_sum(galmean.log_prob(data))
        logprob = tf.multiply(logprob, 1 / nc**3, name='logprob')

        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
        prior = tf.multiply(prior, 1 / nc**3, name='prior')
        #
        loss = logprob + prior

        return loss
예제 #4
0
    def recon_prototype(linearflat):
        """
        """
        linear = tf.reshape(linearflat, data.shape)
        #

        #loss = tf.reduce_sum(tf.square(linear - minimum))
        state = lpt_init(linear, a0=0.1, order=1)
        final_state = nbody(state, stages, FLAGS.nc)
        final_field = cic_paint(tf.zeros_like(linear), final_state[0])
        #final_field = pmgraph(linear)
        base = final_field

        if FLAGS.anneal:
            Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
            smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
            basek = r2c3d(base, norm=nc**3)
            basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
            base = c2r3d(basek, norm=nc**3)

        galmean = tfp.distributions.Poisson(rate=plambda * (1 + base))
        logprob = -tf.reduce_sum(galmean.log_prob(data))
        #logprob = tf.multiply(logprob, 1/nc**3, name='logprob')

        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
        #prior = tf.multiply(prior, 1/nc**3, name='prior')
        #
        loss = logprob + prior

        grad = tf.gradients(loss, linearflat)
        print(grad)
        return loss, grad[0]
예제 #5
0
    def recon_prototype(linear, Rsm):
        """
        """

        linear = tf.reshape(linear, data.shape)
        #loss = tf.reduce_sum(tf.square(linear - minimum))
        final_field = pm(linear)

        residual = final_field - data.astype(np.float32)
        base = residual

        if anneal:
            print("\nAdd annealing section to graph\n")
            Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
            smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
            basek = r2c3d(base, norm=nc**3)
            basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
            base = c2r3d(basek, norm=nc**3)

        chisq = tf.multiply(base, base)
        chisq = tf.reduce_sum(chisq)
        chisq = tf.multiply(chisq, 1 / nc**3, name='chisq')

        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
        prior = tf.multiply(prior, 1 / nc**3, name='prior')
        #
        loss = chisq + prior

        return loss
예제 #6
0
def colorps(x, ps, kbinmapflat):
    nc = x.shape[-1]
    pmesh = tf.transpose(tf.gather(tf.transpose(ps), kbinmapflat))
    pmesh = tf.reshape(pmesh, x.shape)
    xnew = c2r3d(r2c3d(x, norm=nc**3) * tf.cast(pmesh**0.5, tf.complex64),
                 norm=nc**3)
    return xnew
예제 #7
0
    def loss_fn(self, linear, data, Rsm=tf.constant(0.)):
        """
        """
        bs, nc = self.bs, self.nc
        linear = tf.reshape(linear, data.shape)
        final_field = self.forward(linear)
        base = final_field

        if self.anneal:
            print('\nAdd annealing graph\n')
            Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
            smwts = tf.exp(tf.multiply(-self.kmesh**2, Rsmsq))
            basek = r2c3d(base, norm=nc**3)
            basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
            base = c2r3d(basek, norm=nc**3)

        galmean = tfp.distributions.Poisson(rate=self.plambda * (1 + base))
        sample = galmean.sample()
        logprob = -tf.reduce_sum(galmean.log_prob(data))
        #logprob = tf.multiply(logprob, 1/nc**3, name='logprob')

        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / self.priorwt))
        #prior = tf.multiply(prior, 1/nc**3, name='prior')
        #
        loss = logprob + prior

        return loss
예제 #8
0
    def reconstruct_loss(self, linear, data, bias, errormesh, Rsm=tf.constant(0.), useprior=True):
        """
        """
        args = self.args
        bs, nc = args.bs, args.nc
        kmesh = args.kmesh
        priorwt = args.priorwt

        linear = tf.reshape(linear, data.shape)
        bmodel = self.biasfield(linear, bias)
        residual = bmodel - data
        base = residual

        print("\nAdd annealing section to graph\n")
        Rsmsq = tf.multiply(Rsm*bs/nc, Rsm*bs/nc)
        smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
        basek = r2c3d(base, norm=nc**3)
        basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
        base = c2r3d(basek, norm=nc**3)   
        
        resk = r2c3d(base, norm=nc**3)
        reskmesh = tf.square(tf.cast(tf.abs(resk), tf.float32))
        chisq = tf.reduce_mean(tf.multiply(reskmesh, 1/errormesh))
        chisq = chisq * bs**3/nc**1.5

        if useprior:
            lineark = r2c3d(linear, norm=nc**3)
            priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
            prior = tf.reduce_mean(tf.multiply(priormesh, 1/priorwt))
            prior = prior * bs**3/nc**1.5
        else: prior = 0.
        loss = chisq + prior

        return loss #, chisq, prior
예제 #9
0
def lpt1(dlin_k, pos, kvec=None, name="LTP1"):
    """ Run first order LPT on linear density field, returns displacements of particles
      reading out at q. The result has the same dtype as q.

  Parameters:
  -----------
  dlin_k: TODO: @modichirag add documentation

  Returns:
  --------
  displacement: tensor (batch_size, npart, 3)
    Displacement field
  """
    with tf.name_scope(name):
        dlin_k = tf.convert_to_tensor(dlin_k, name="lineark")
        pos = tf.convert_to_tensor(pos, name="pos")

        shape = dlin_k.get_shape()
        batch_size, nc = shape[0], shape[1:]
        if kvec is None:
            kvec = fftk(nc, symmetric=False)

        lap = tf.cast(laplace_kernel(kvec), tf.complex64)

        displacement = []
        for d in range(3):
            kweight = gradient_kernel(kvec, d) * lap
            dispc = tf.multiply(dlin_k, kweight)
            disp = c2r3d(dispc, norm=nc[0] * nc[1] * nc[2])
            displacement.append(cic_readout(disp, pos))
        displacement = tf.stack(displacement, axis=2)
        return displacement
예제 #10
0
def standardrecon(base, pos, bias, R):

    #base = base.astype(np.float32)
    #pos = pos.astype(base.dtype)
    smwts = tf.exp(tf.multiply(-kmesh**2, R**2))
    basek = utils.r2c3d(base, norm=nc**3)
    basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
    basesm = utils.c2r3d(basek, norm=nc**3)

    grid = bs/nc*np.indices((nc, nc, nc)).reshape(3, -1).T.astype(np.float32)
    grid = tf.constant(np.expand_dims(grid, 0))
    grid = grid *nc/bs
    pos = pos *nc/bs
        
    mesh = basesm #tf.constant(basesm.astype(np.float32))
    meshk = utils.r2c3d(mesh, norm=nc**3)
    
    DX = tfpm.lpt1(meshk, pos, kvec=kvec)
    DX = tf.multiply(DX, -1/bias)
    pos = tf.add(pos, DX)
    displaced = tf.zeros_like(mesh)
    displaced = utils.cic_paint(displaced, pos, name='displaced')
    
    DXrandom = tfpm.lpt1(meshk, grid, kvec)
    DXrandom = tf.multiply(DXrandom, -1/bias)
    posrandom = tf.add(grid, DXrandom)
    random = tf.zeros_like(mesh)
    random = utils.cic_paint(random, posrandom, name='random')
    return displaced, random
예제 #11
0
    def loss_fn(self, linear, data, Rsm=tf.constant(0.)):
        """
        """
        bs, nc = self.bs, self.nc
        linear = tf.reshape(linear, data.shape)
        final_field = self.forward(linear)
        residual = final_field - data
        base = residual

        if self.anneal:
            print("\nAdd annealing section to graph\n")
            Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
            smwts = tf.exp(tf.multiply(-self.kmesh**2, Rsmsq))
            basek = r2c3d(base, norm=nc**3)
            basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
            base = c2r3d(basek, norm=nc**3)

        chisq = tf.multiply(base, base)
        chisq = tf.reduce_sum(chisq)
        #chisq = tf.multiply(chisq, 1/nc**3, name='chisq')

        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / self.priorwt))
        #prior = tf.multiply(prior, 1/nc**3, name='prior')
        #
        loss = chisq + prior
        return loss
def linear_field(nc,
                 boxsize,
                 pk,
                 kvec=None,
                 batch_size=1,
                 seed=None,
                 dtype=tf.float32,
                 name="LinearField"):
    """Generates a linear field with a given linear power spectrum.
  
  Parameters:
  -----------
  nc: int, or list of ints
    Number of cells in the field. If a list is provided, number of cells per
    dimension.
  boxsize: float, or list of floats
    Physical size of the cube, in Mpc/h.
  pk: interpolator
    Power spectrum to use for the field
  kvec: array
    k_vector corresponding to the cube, optional
  batch_size: int
    Size of batches
  seed: int
    Seed to initialize the gaussian random field
  dtype: tf.dtype
    Type of the sampled field, e.g. tf.float32 or tf.float64
  Returns
  ------
  linfield: tensor (batch_size, nc, nc, nc)
    Realization of the linear field with requested power spectrum
  """
    with tf.name_scope(name):
        # Transform nc to a list of necessary
        if isinstance(nc, int):
            nc = [nc, nc, nc]
        if isinstance(boxsize, int) or isinstance(boxsize, float):
            boxsize = [boxsize, boxsize, boxsize]

        if kvec is None:
            kvec = fftk(nc, symmetric=False)
        kmesh = sum(
            (kk / boxsize[i] * nc[i])**2 for i, kk in enumerate(kvec))**0.5
        pkmesh = pk(kmesh)

        whitec = white_noise(nc,
                             batch_size=batch_size,
                             seed=seed,
                             type='complex')
        lineark = tf.multiply(
            whitec, (pkmesh / (boxsize[0] * boxsize[1] * boxsize[2]))**0.5)
        linear = c2r3d(lineark,
                       norm=nc[0] * nc[1] * nc[2],
                       name=name,
                       dtype=dtype)
        return linear
예제 #13
0
    def recon_prototype(x0=None):
        """
        """       
#        linear = tf.get_variable('linmesh', shape=(1, nc, nc, nc), dtype=tf.float32,
#                             initializer=tf.random_normal_initializer(), trainable=True)
        if x0 is None:
            linear = tf.get_variable('linmesh', shape=(1, nc, nc, nc), dtype=tf.float32,
                             initializer=tf.random_normal_initializer(), trainable=True)
        else:
            linear = tf.get_variable('linmesh', shape=(1, nc, nc, nc), dtype=tf.float32,
                             initializer=tf.constant_initializer(x0), trainable=True)

        state = lpt_init(linear, a0=0.1, order=1)
        final_state = nbody(state,  stages, FLAGS.nc)
        final_field = cic_paint(tf.zeros_like(linear), final_state[0])
        base = final_field

        if FLAGS.anneal:
            print('\nAdd annealing graph\n')
            Rsmsq = tf.multiply(Rsm*bs/nc, Rsm*bs/nc)
            smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
            basek = r2c3d(base, norm=nc**3)
            basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
            base = c2r3d(basek, norm=nc**3)   

        galmean = tfp.distributions.Poisson(rate = plambda * (1 + base))
        sample = galmean.sample()
        logprob = -tf.reduce_sum(galmean.log_prob(data))
        #logprob = tf.multiply(logprob, 1/nc**3, name='logprob')
        
        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1/priorwt))
        #prior = tf.multiply(prior, 1/nc**3, name='prior')
        #
        loss = logprob + prior

        #opt = tf.train.GradientDescentOptimizer(learning_rate=0.1)
        opt = tf.train.AdamOptimizer(learning_rate=FLAGS.lr)

        #step = tf.Variable(0, trainable=False)
        #schedule = tf.optimizers.schedules.PiecewiseConstantDecay(
        #    [10000, 15000], [1e-0, 1e-1, 1e-2])
        ## lr and wd can be a function or a tensor
        #lr = 1e-1 * schedule(step)
        #wd = lambda: 1e-4 * schedule(step)
        #opt = tfa.optimizers.AdamW(learning_rate=FLAGS.lr, weight_decay=1e-1)

        # Compute the gradients for a list of variables.
        grads_and_vars = opt.compute_gradients(loss, [linear])
        print("\ngradients : ", grads_and_vars)
        update_ops = opt.apply_gradients(grads_and_vars)

        return linear, sample, update_ops, loss, logprob, prior
예제 #14
0
def test_r2c2r():
    bs = 50
    nc = 16
    batch_size = 3
    base = 100 * np.random.randn(batch_size, nc, nc, nc).astype(np.float64)

    cfield = r2c3d(tf.constant(base, dtype=tf.float64), dtype=tf.complex128)
    rfield = c2r3d(cfield, dtype=tf.float64)
    rec = rfield.numpy()

    assert_allclose(base, rec, rtol=1e-09)
예제 #15
0
def test_r2c2r():
    bs = 50
    nc = 16
    batch_size = 3
    base = 100 * np.random.randn(batch_size, nc, nc, nc).astype(np.float64)

    with tf.Session() as sess:
        cfield = r2c3d(tf.constant(base, dtype=tf.float64),
                       dtype=tf.complex128)
        rfield = c2r3d(cfield, dtype=tf.float64)
        sess.run(tf.global_variables_initializer())
        rec = sess.run(rfield)

    assert_allclose(base, rec, rtol=1e-09)
def apply_pgd(x, delta_k, alpha, kl, ks, kvec=None, name="ApplyPGD"):
    """
  Estimate the short range force on the particles given a state.

  Parameters:
  -----------
  x: tensor
    Input state tensor of shape (3, batch_size, npart, 3) 
    
  delta_k:
     Density in the Fourier space   
    
  alpha: float
    Free parameter. Factor of proportionality between the displacement and the Particle-mesh force.

  kl: float
    Long range scale parameter
    
  ks: float
    Short range scale parameter
  """
    with tf.name_scope(name):
        x = tf.convert_to_tensor(x, name="pos")
        delta_k = tf.convert_to_tensor(delta_k, name="delta_k")

        shape = delta_k.get_shape()
        nc = shape[1:]

        if kvec is None:
            kvec = fftk(nc, symmetric=False)

        ndim = 3
        norm = nc[0] * nc[1] * nc[2]

        lap = tf.cast(laplace_kernel(kvec), tf.complex64)
        PGD_range = tf.cast(PGD_kernel(kvec, kl, ks), tf.complex64)
        kweight = lap * PGD_range
        pot_k = tf.multiply(delta_k, kweight)

        f = []
        for d in range(ndim):
            force_dc = tf.multiply(pot_k, gradient_kernel(kvec, d))
            forced = c2r3d(force_dc, norm=norm)
            force = cic_readout(forced, x)
            f.append(force)

        f = tf.stack(f, axis=2)
        f = tf.multiply(f, alpha)
        return f
예제 #17
0
def test_lpt2():
    """ Checking lpt2_source, this also checks the laplace and gradient kernels
  """
    pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc], dtype='f4')
    grid = pm.generate_uniform_particle_grid(shift=0).astype(np.float32)

    whitec = pm.generate_whitenoise(100, mode='complex', unitary=False)
    lineark = whitec.apply(lambda k, v: Planck15.get_pklin(
        sum(ki**2 for ki in k)**0.5, 0)**0.5 * v / v.BoxSize.prod()**0.5)

    # Compute lpt1 from fastpm with matching kernel order
    source = fpmops.lpt2source(lineark).c2r()

    # Same thing from tensorflow
    tfsource = tfpm.lpt2_source(
        pmutils.r2c3d(tf.expand_dims(np.array(lineark.c2r()), axis=0)))
    tfread = pmutils.c2r3d(tfsource).numpy()

    assert_allclose(source, tfread[0], atol=1e-5)
예제 #18
0
def pm_poisson():
    print("PM graph")
    linear = flowpm.linear_field(nc, bs, ipklin, batch_size=batch_size)
    if args.nbody:
        print('Nobdy sim')
        state = lpt_init(linear, a0=a0, order=args.lpt_order)
        final_state = nbody(state,  stages, nc)
    else:
        print('ZA/2LPT sim')
        final_state = lpt_init(linear, a0=af, order=args.lpt_order)
    tfinal_field = cic_paint(tf.zeros_like(linear), final_state[0])
    base = tfinal_field
    if Rsm != 0:
         basek = r2c3d(tfinal_field, norm=nc**3)
         basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
         base = c2r3d(basek, norm=nc**3)

    galmean = tfp.distributions.Poisson(rate = plambda * (1 + base))
    result = galmean.sample()
    return linear, tfinal_field, result, base
예제 #19
0
def recon_prototype(linear):
    """
    """

    linear = tf.reshape(linear, minimum.shape)
    #loss = tf.reduce_sum(tf.square(linear - minimum))

    state = lpt_init(linear, a0=0.1, order=1)
    final_state = nbody(state, stages, FLAGS.nc)
    final_field = cic_paint(tf.zeros_like(linear), final_state[0])

    residual = final_field - data.astype(np.float32)
    base = residual
    Rsm = tf.placeholder(tf.float32, name='smoothing')
    if FLAGS.anneal:
        print("\nAdd annealing section to graph\n")
        Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
        smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
        basek = r2c3d(base, norm=nc**3)
        basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
        base = c2r3d(basek, norm=nc**3)


#
    chisq = tf.multiply(base, base)
    chisq = tf.reduce_sum(chisq)
    chisq = tf.multiply(chisq, 1 / nc**3, name='chisq')

    #Prior
    lineark = r2c3d(linear, norm=nc**3)
    priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
    prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
    prior = tf.multiply(prior, 1 / nc**3, name='prior')
    #
    loss = chisq + prior

    return loss
예제 #20
0
def apply_longrange(x,
                    delta_k,
                    split=0,
                    factor=1,
                    kvec=None,
                    name="ApplyLongrange"):
    """ like long range, but x is a list of positions
  TODO: Better documentation, also better name?
  """
    # use the four point kernel to suppresse artificial growth of noise like terms
    with tf.name_scope(name):
        x = tf.convert_to_tensor(x, name="pos")
        delta_k = tf.convert_to_tensor(delta_k, name="delta_k")

        shape = delta_k.get_shape()
        nc = shape[1:]

        if kvec is None:
            kvec = fftk(nc, symmetric=False)

        ndim = 3
        norm = nc[0] * nc[1] * nc[2]
        lap = tf.cast(laplace_kernel(kvec), tf.complex64)
        fknlrange = longrange_kernel(kvec, split)
        kweight = lap * fknlrange
        pot_k = tf.multiply(delta_k, kweight)

        f = []
        for d in range(ndim):
            force_dc = tf.multiply(pot_k, gradient_kernel(kvec, d))
            forced = c2r3d(force_dc, norm=norm)
            force = cic_readout(forced, x)
            f.append(force)

        f = tf.stack(f, axis=2)
        f = tf.multiply(f, factor)
        return f
예제 #21
0
    def recon_prototype(linearflat):
        """
        """

        linear = tf.reshape(linearflat, data.shape)
        #

        #loss = tf.reduce_sum(tf.square(linear - minimum))

        state = lpt_init(linear, a0=0.1, order=1)
        final_state = nbody(state, stages, FLAGS.nc)
        final_field = cic_paint(tf.zeros_like(linear), final_state[0])

        residual = final_field - data.astype(np.float32)
        base = residual
        Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
        smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
        basek = r2c3d(base, norm=nc**3)
        basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
        base = c2r3d(basek, norm=nc**3)
        #
        chisq = tf.multiply(base, base)
        chisq = tf.reduce_sum(chisq)
        chisq = tf.multiply(chisq, 1 / nc**3, name='chisq')

        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
        prior = tf.multiply(prior, 1 / nc**3, name='prior')
        #
        loss = chisq + prior

        grad = tf.gradients(loss, linearflat)
        print(grad)
        return loss, grad[0]
예제 #22
0
def recon_model(data, sigma=0.01**0.5, maxiter=100, anneal=False, dataovd=False, gtol=1e-5):

    #bs, nc = config['boxsize'], config['nc']
    kvec = flowpm.kernels.fftk([nc, nc, nc], symmetric=False)
    kmesh = sum(kk**2 for kk in kvec)**0.5
    priorwt = ipklin(kmesh) * bs ** -3 
    
    g = tf.Graph()

    with g.as_default():
        
        initlin = tf.placeholder(tf.float32, data.shape, name='initlin')
        linear = tf.get_variable('linmesh', shape=(nc, nc, nc), 
                             initializer=tf.random_normal_initializer(), trainable=True)
        initlin_op = linear.assign(initlin, name='initlin_op')
        #PM
        icstate = tfpm.lptinit(linear, FLAGS.a0, name='icstate')
        fnstate = tfpm.nbody(icstate, stages, nc, name='fnstate')
        final = tf.zeros_like(linear)
        final = cic_paint(final, fnstate[0], name='final')
        if dataovd:
            print('\Converting final density to overdensity because data is that\n')
            fmean = tf.reduce_mean(final)
            final = tf.multiply(final, 1/ fmean)
            final = final - 1
        #
        #Prior
        lineark = r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1/priorwt))
        prior = tf.multiply(prior, 1/nc**3, name='prior')

        likelihood = tf.subtract(final, data)
        likelihood = tf.multiply(likelihood, 1/sigma)
        #galmean = tfp.distributions.Poisson(rate = plambda * (1 + finalfield))
        #logprob = galmean.log_prob(data)

        ##Anneal
        Rsm = tf.placeholder(tf.float32, name='smoothing')
        if anneal :
            print('\nAdding annealing part to graph\n')
            Rsm = tf.multiply(Rsm, bs/nc)
            Rsmsq = tf.multiply(Rsm, Rsm)
            smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
            likelihood = tf.squeeze(likelihood)
            likelihoodk = r2c3d(likelihood, norm=nc**3)
            likelihoodk = tf.multiply(likelihoodk, tf.cast(smwts, tf.complex64))
            residual = c2r3d(likelihoodk, norm=nc**3)
        else:
            residual = tf.identity(likelihood)
            
        chisq = tf.multiply(residual, residual)
        chisq = tf.reduce_sum(chisq)
        chisq = tf.multiply(chisq, 1/nc**3, name='chisq')

        loss = tf.add(chisq, prior, name='loss')
        
        #optimizer = ScipyOptimizerInterface(loss, var_list=[linear], method='L-BFGS-B', 
        #                                    options={'maxiter': maxiter, 'gtol':gtol})

        optimizer = tf.optimize.AdamWeightDecayOptimizer(0.01)        
        var_grads = tf.gradients(
            [loss], [linear])

        update_ops = optimizer.apply_grads(var_grads, linear)

        
        tf.add_to_collection('inits', [initlin_op, initlin])
        #tf.add_to_collection('opt', optimizer)
        tf.add_to_collection('opt', update_ops)
        tf.add_to_collection('diagnostics', [prior, chisq, loss])
        tf.add_to_collection('reconpm', [linear, final, fnstate])
        tf.add_to_collection('data', data)
    return g
예제 #23
0
def recon_prototype(data,
                    anneal=True,
                    nc=FLAGS.nc,
                    bs=FLAGS.box_size,
                    batch_size=FLAGS.batch_size,
                    a0=FLAGS.a0,
                    a=FLAGS.af,
                    nsteps=FLAGS.nsteps,
                    dtype=tf.float32):
    """
    Prototype of function computing LPT deplacement.

    Returns output tensorflow and mesh tensorflow tensors
    """
    if dtype == tf.float32:
        npdtype = "float32"
        cdtype = tf.complex64
    elif dtype == tf.float64:
        npdtype = "float64"
        cdtype = tf.complex128
    print(dtype, npdtype)

    #graph = mtf.Graph()
    #mesh = mtf.Mesh(graph, "my_mesh")

    linear = tf.get_variable('linmesh',
                             shape=(1, nc, nc, nc),
                             dtype=tf.float32,
                             initializer=tf.random_normal_initializer(),
                             trainable=True)

    state = lpt_init(linear, a0=0.1, order=1)
    final_state = nbody(state, stages, FLAGS.nc)
    final_field = cic_paint(tf.zeros_like(linear), final_state[0])

    residual = final_field - data.astype(np.float32)
    base = residual
    ##Anneal
    Rsm = tf.placeholder(tf.float32, name='smoothing')
    if anneal:
        #def anneal
        Rsmsq = tf.multiply(Rsm * bs / nc, Rsm * bs / nc)
        smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
        basek = r2c3d(base, norm=nc**3)
        basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
        base = c2r3d(basek, norm=nc**3)

    chisq = tf.multiply(base, base)
    chisq = tf.reduce_sum(chisq)
    #chisq = tf.multiply(chisq, 1/nc**3, name='chisq')

    #Prior
    lineark = r2c3d(linear, norm=nc**3)
    priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
    prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
    #prior = tf.multiply(prior, 1/nc**3, name='prior')
    #

    loss = chisq + prior

    ##    #optimizer = tf.optimize.AdamWeightDecayOptimizer(0.01)
    ##    opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)
    ##
    ##    # Compute the gradients for a list of variables.
    ##    grads_and_vars = opt.compute_gradients(loss, [linear])
    ##    print("\ngradients : ", grads_and_vars)
    ##    update_ops = opt.apply_gradients(grads_and_vars)
    ##
    ##    #optimizer = tf.keras.optimizers.Adam(0.01)
    ##    #var_grads = tf.gradients([loss], [linear])
    ##
    ##
    ##    #update_ops = optimizer.apply_gradients(var_grads, linear)
    ##    #update_ops = optimizer.apply_gradients(zip(var_grads, [linear]))
    ##    #update_ops = None
    ##    #lr = tf.placeholder(tf.float32, shape=())
    ##    #update_op = mtf.assign(fieldvar, fieldvar - var_grads[0]*lr)
    ##
    return linear, final_field, loss, chisq, prior