Esempio n. 1
0
def linfieldlocal(white, config, name='linfield'):
    '''generate a linear field with a given linear power spectrum'''

    bs, nc = config['boxsize'], config['nc']
    whitec = tfpf.r2c3d(white, norm=nc**3)
    lineark = tf.multiply(whitec, (pkmesh / bs**3)**0.5)
    linear = tfpf.c2r3d(lineark, norm=nc**3, name=name)
    return linear
Esempio n. 2
0
def reconmodel(config,
               data,
               sigma=0.01**0.5,
               maxiter=100,
               gtol=1e-5,
               anneal=True):

    bs, nc = config['boxsize'], config['nc']
    kmesh = sum(kk**2 for kk in config['kvec'])**0.5
    priorwt = config['ipklin'](kmesh) * bs**-3

    g = tf.Graph()

    with g.as_default():

        module = hub.Module(modpath)
        initlin = tf.placeholder(tf.float32, (nc, nc, nc), name='initlin')
        linear = tf.get_variable('linmesh',
                                 shape=(nc, nc, nc),
                                 initializer=tf.random_normal_initializer(
                                     mean=1.0, stddev=0.5),
                                 trainable=True)
        initlin_op = linear.assign(initlin, name='initlin_op')
        #PM
        icstate = tfpm.lptinit(linear, config, name='icstate')
        fnstate = tfpm.nbody(icstate, config, verbose=False, name='fnstate')
        final = tf.zeros_like(linear)
        final = tfpf.cic_paint(final, fnstate[0], boxsize=bs, name='final')
        #
        #xx = tf.reshape(final, shape=[-1, cube_sizeft, cube_sizeft, cube_sizeft, nchannels], name='input')
        xx = tf.concat((final[-pad:, :, :], final, final[:pad, :, :]), axis=0)
        xx = tf.concat((xx[:, -pad:, :], xx, xx[:, :pad, :]), axis=1)
        xx = tf.concat((xx[:, :, -pad:], xx, xx[:, :, :pad]), axis=2)
        xx = tf.expand_dims(tf.expand_dims(xx, 0), -1)
        #Halos
        #yy = tf.reshape(data, shape=[-1, cube_size, cube_size, cube_size, 1], name='labels')
        yy = tf.expand_dims(data, 0)

        print('xx, yy shape :', xx.shape, yy.shape)
        likelihood = module(dict(features=tf.cast(xx, tf.float32),
                                 labels=tf.cast(yy, tf.float32)),
                            as_dict=True)['loglikelihood']
        print(likelihood.shape)

        ##Anneal
        Rsm = tf.placeholder(tf.float32, name='smoothing')
        if anneal:
            Rsm = tf.multiply(Rsm, bs / nc)
            Rsmsq = tf.multiply(Rsm, Rsm)
            smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
            likelihood = tf.squeeze(likelihood)
            print(likelihood.shape)
            likelihoodk = tfpf.r2c3d(likelihood, norm=nc**3)
            likelihoodk = tf.multiply(likelihoodk,
                                      tf.cast(smwts, tf.complex64))
            likelihood = tfpf.c2r3d(likelihoodk, norm=nc**3)

        residual = -tf.reduce_sum(likelihood)

        #Prior
        lineark = tfpf.r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
        prior = tf.multiply(prior, 1 / nc**3, name='prior')

        chisq = tf.multiply(residual, 1 / nc**0, name='chisq')

        loss = tf.add(chisq, prior, name='loss')

        optimizer = ScipyOptimizerInterface(loss,
                                            var_list=[linear],
                                            method='L-BFGS-B',
                                            options={
                                                'maxiter': maxiter,
                                                'gtol': gtol
                                            })

        tf.add_to_collection('inits', [initlin_op, initlin])
        tf.add_to_collection('opt', optimizer)
        tf.add_to_collection('diagnostics', [prior, chisq, loss])
        tf.add_to_collection('reconpm', [linear, final, fnstate])
        tf.add_to_collection('data', data)
    return g
Esempio n. 3
0
        ##        gradandvars_prior = optimizer.compute_gradients(prior, linmesh)
        ##        opt_op = optimizer.apply_gradients(grads_and_vars1, name='apply_grad')
        ##
        gradandvars_new = []

        for i in range(len(gradandvars_chisq)):
            g1, v = gradandvars_chisq[i]
            g2, _ = gradandvars_prior[i]

            if len(R0s) > 1:
                gk = tfpf.r2c3d(g1, norm=nc**3)
                smwts = tf.exp(
                    tf.multiply(-0.5 * kmesh**2,
                                tf.multiply(Rsm * bs / nc, Rsm * bs / nc)))
                gk = tf.multiply(gk, tf.cast(smwts, tf.complex64))
                g1 = tfpf.c2r3d(gk, norm=nc**3)

            gradandvars_new.append((g1 + g2, v))

        applygrads = optimizer.apply_gradients(gradandvars_new)

        if datainit:
            initval = np.exp(session.run(samples, {Rsm: 0}))

        if initval is not None:
            print('Do init')
            initlinop = g.get_operation_by_name('initlin_op')
            initlin = g.get_tensor_by_name('initlin:0')
            session.run(initlinop, {initlin: initval})

        def checkiter(mode, optfolder, R0=0):
Esempio n. 4
0
    linear = tfpf.c2r3d(lineark, norm=nc**3, name=name)
    return linear


#

config = Config(bs=100, nc=32, seed=200, pkfile=pkfile)
bs, nc = config['boxsize'], config['nc']
kmesh = sum(kk**2 for kk in config['kvec'])**0.5
pkmesh = config['ipklin'](kmesh)
print(bs, nc)

xx = tf.placeholder(tf.float32, (nc, nc, nc), name='white')
whitec = tfpf.r2c3d(xx, norm=nc**3)
lineark = tf.multiply(whitec, (pkmesh / bs**3)**0.5)
linear = tfpf.c2r3d(lineark, norm=nc**3, name='linear')
icstate = tfpm.lptinit(linear, config, name='icstate')
fnstate = tfpm.nbody(icstate, config, verbose=False, name='fnstate')
final = tf.zeros_like(linear)
final = tfpf.cic_paint(final,
                       fnstate[0],
                       boxsize=config['boxsize'],
                       name='final')


def relu(x):
    mask = x < 0
    y = x.copy()
    y[mask] *= 0
    return y