Esempio n. 1
0
def pm(config, verbose=True):
    g = tf.Graph()
    with g.as_default():
        linear = tfpm.linfield(config, name='linear')
        icstate = tfpm.lptinit(linear, config, name='icstate')
        fnstate = tfpm.nbody(icstate, config, verbose=verbose, name='fnstate')
        final = tf.zeros_like(linear)
        final = tfpf.cic_paint(final, fnstate[0], boxsize=bs, name='final')
        tf.add_to_collection('pm', [linear, icstate, fnstate, final])
    return g
Esempio n. 2
0
def reconmodel(config, data, sigma=0.01**0.5, maxiter=100):

    bs, nc = config['boxsize'], config['nc']
    kmesh = sum(kk**2 for kk in config['kvec'])**0.5
    priorwt = config['ipklin'](kmesh) * bs**-3

    g = tf.Graph()

    with g.as_default():

        initlin = tf.placeholder(tf.float32, data.shape, name='initlin')
        linear = tf.get_variable('linmesh',
                                 shape=(nc, nc, nc),
                                 initializer=tf.random_normal_initializer(),
                                 trainable=True)
        initlin_op = linear.assign(initlin, name='initlin_op')
        #PM
        icstate = tfpm.lptinit(linear, config, name='icstate')
        fnstate = tfpm.nbody(icstate, config, verbose=False, name='fnstate')
        final = tf.zeros_like(linear)
        final = tfpf.cic_paint(final, fnstate[0], boxsize=bs, name='final')
        #
        #Prior
        lineark = tfpf.r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
        prior = tf.multiply(prior, 1 / nc**3, name='prior')

        data2d = data.sum(axis=0)
        final2d = tf.reduce_sum(final, axis=0)
        residual = tf.subtract(final2d, data2d)

        residual = tf.multiply(residual, 1 / sigma)

        chisq = tf.multiply(residual, residual)
        chisq = tf.reduce_sum(chisq)
        chisq = tf.multiply(chisq, 1 / nc**2, name='chisq')

        loss = tf.add(chisq, prior, name='loss')

        optimizer = ScipyOptimizerInterface(loss,
                                            var_list=[linear],
                                            method='L-BFGS-B',
                                            options={'maxiter': maxiter})

        tf.add_to_collection('utils', [initlin_op, initlin])
        tf.add_to_collection('opt', optimizer)
        tf.add_to_collection('diagnostics', [prior, chisq, loss])
        tf.add_to_collection('reconpm', [linear, final, fnstate, final2d])
        tf.add_to_collection('data', [data, data2d])
    return g
Esempio n. 3
0
def graphlintomod(config, modpath, pad=False, ny=1):
    '''return graph to do pm sim and then sample halo positions from it'''
    bs, nc = config['boxsize'], config['nc']

    g = tf.Graph()
    with g.as_default():
        module = hub.Module(modpath)

        linmesh = tf.placeholder(tf.float32, (nc, nc, nc), name='linmesh')
        datamesh = tf.placeholder(tf.float32, (nc, nc, nc, ny),
                                  name='datamesh')

        #PM
        linear = tf.Variable(0.)
        linear = tf.assign(linear,
                           linmesh,
                           validate_shape=False,
                           name='linear')
        icstate = tfpm.lptinit(linear, config, name='icstate')
        fnstate = tfpm.nbody(icstate, config, verbose=False, name='fnstate')
        final = tf.zeros_like(linear)
        final = tfpf.cic_paint(final, fnstate[0], boxsize=bs, name='final')
        #Sample
        if pad:
            xx = tf.concat((final[-pad:, :, :], final, final[:pad, :, :]),
                           axis=0)
            xx = tf.concat((xx[:, -pad:, :], xx, xx[:, :pad, :]), axis=1)
            xx = tf.concat((xx[:, :, -pad:], xx, xx[:, :, :pad]), axis=2)
            xx = tf.expand_dims(tf.expand_dims(xx, 0), -1)
        else:
            xx = tf.assign(final)

        yy = tf.expand_dims(datamesh, 0)
        samples = module(dict(features=xx, labels=yy), as_dict=True)['sample']
        samples = tf.identity(samples, name='samples')
        loglik = module(dict(features=xx, labels=yy),
                        as_dict=True)['loglikelihood']
        loglik = tf.identity(loglik, name='loglik')

        tf.add_to_collection('inits', [linmesh, datamesh])
        tf.add_to_collection('reconpm',
                             [linear, final, fnstate, samples, loglik])

    return g
Esempio n. 4
0
def graphpm(config, verbose=True, initlin=False):
    '''return graph to do pm simulation
    if initlin is False, the returned graph generates initial conditions
    if initlin is True, the returned graph has a placeholder'''
    bs, nc = config['boxsize'], config['nc']
    g = tf.Graph()
    with g.as_default():

        linmesh = tf.placeholder(tf.float32, (nc, nc, nc), name='linmesh')
        if initlin:
            linear = tf.Variable(0.)
            linear = tf.assign(linear, linmesh, validate_shape=False, name='linear')
        else:
            linear = tfpm.linfield(config, name='linear')
        icstate = tfpm.lptinit(linear, config, name='icstate')
        fnstate = tfpm.nbody(icstate, config, verbose=verbose, name='fnstate')
        final = tf.zeros_like(linear)
        final = tfpf.cic_paint(final, fnstate[0], boxsize=config['boxsize'], name='final')
        tf.add_to_collection('pm', [linear, icstate, fnstate, final])
    return g
Esempio n. 5
0
def reconmodel(config,
               data,
               sigma=0.01**0.5,
               maxiter=100,
               gtol=1e-5,
               anneal=True):

    bs, nc = config['boxsize'], config['nc']
    kmesh = sum(kk**2 for kk in config['kvec'])**0.5
    priorwt = config['ipklin'](kmesh) * bs**-3

    g = tf.Graph()

    with g.as_default():

        module = hub.Module(modpath)
        initlin = tf.placeholder(tf.float32, (nc, nc, nc), name='initlin')
        linear = tf.get_variable('linmesh',
                                 shape=(nc, nc, nc),
                                 initializer=tf.random_normal_initializer(
                                     mean=1.0, stddev=0.5),
                                 trainable=True)
        initlin_op = linear.assign(initlin, name='initlin_op')
        #PM
        icstate = tfpm.lptinit(linear, config, name='icstate')
        fnstate = tfpm.nbody(icstate, config, verbose=False, name='fnstate')
        final = tf.zeros_like(linear)
        final = tfpf.cic_paint(final, fnstate[0], boxsize=bs, name='final')
        #
        #xx = tf.reshape(final, shape=[-1, cube_sizeft, cube_sizeft, cube_sizeft, nchannels], name='input')
        xx = tf.concat((final[-pad:, :, :], final, final[:pad, :, :]), axis=0)
        xx = tf.concat((xx[:, -pad:, :], xx, xx[:, :pad, :]), axis=1)
        xx = tf.concat((xx[:, :, -pad:], xx, xx[:, :, :pad]), axis=2)
        xx = tf.expand_dims(tf.expand_dims(xx, 0), -1)
        #Halos
        #yy = tf.reshape(data, shape=[-1, cube_size, cube_size, cube_size, 1], name='labels')
        yy = tf.expand_dims(data, 0)

        print('xx, yy shape :', xx.shape, yy.shape)
        likelihood = module(dict(features=tf.cast(xx, tf.float32),
                                 labels=tf.cast(yy, tf.float32)),
                            as_dict=True)['loglikelihood']
        print(likelihood.shape)

        ##Anneal
        Rsm = tf.placeholder(tf.float32, name='smoothing')
        if anneal:
            Rsm = tf.multiply(Rsm, bs / nc)
            Rsmsq = tf.multiply(Rsm, Rsm)
            smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
            likelihood = tf.squeeze(likelihood)
            print(likelihood.shape)
            likelihoodk = tfpf.r2c3d(likelihood, norm=nc**3)
            likelihoodk = tf.multiply(likelihoodk,
                                      tf.cast(smwts, tf.complex64))
            likelihood = tfpf.c2r3d(likelihoodk, norm=nc**3)

        residual = -tf.reduce_sum(likelihood)

        #Prior
        lineark = tfpf.r2c3d(linear, norm=nc**3)
        priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
        prior = tf.reduce_sum(tf.multiply(priormesh, 1 / priorwt))
        prior = tf.multiply(prior, 1 / nc**3, name='prior')

        chisq = tf.multiply(residual, 1 / nc**0, name='chisq')

        loss = tf.add(chisq, prior, name='loss')

        optimizer = ScipyOptimizerInterface(loss,
                                            var_list=[linear],
                                            method='L-BFGS-B',
                                            options={
                                                'maxiter': maxiter,
                                                'gtol': gtol
                                            })

        tf.add_to_collection('inits', [initlin_op, initlin])
        tf.add_to_collection('opt', optimizer)
        tf.add_to_collection('diagnostics', [prior, chisq, loss])
        tf.add_to_collection('reconpm', [linear, final, fnstate])
        tf.add_to_collection('data', data)
    return g
Esempio n. 6
0
#Do reconstruction here
print('\nDo reconstruction\n')
tf.reset_default_graph()

kmesh = sum(kk**2 for kk in config['kvec'])**0.5
priorwt = config['ipklin'](kmesh)
# priorwt

linear = tf.get_variable('linmesh',
                         shape=(nc, nc, nc),
                         initializer=tf.random_normal_initializer(),
                         trainable=True)
icstate = tfpm.lptinit(linear, grid, config)
fnstate = tfpm.nbody(icstate, config, verbose=False)
final = tf.zeros_like(linear)
final = tfpf.cic_paint(final, fnstate[0], boxsize=bs)
#

lineark = tfpf.r2c3d(linear, norm=nc**3)
prior = tf.square(tf.cast(tf.abs(lineark), tf.float32))
prior = tf.reduce_sum(tf.multiply(prior, priorwt))
prior = tf.multiply(prior, 1 / nc**3)

sigma = 0.01**0.5

residual = tf.subtract(final, data)
residual = tf.multiply(residual, 1 / sigma)

Rsm = tf.placeholder(tf.float32, name='smoothing')

#Rsm = tf.multiply(Rsm, bs/nc)
Esempio n. 7
0
config = Config(bs=100, nc=32, seed=200, pkfile=pkfile)
bs, nc = config['boxsize'], config['nc']
kmesh = sum(kk**2 for kk in config['kvec'])**0.5
pkmesh = config['ipklin'](kmesh)
print(bs, nc)

xx = tf.placeholder(tf.float32, (nc, nc, nc), name='white')
whitec = tfpf.r2c3d(xx, norm=nc**3)
lineark = tf.multiply(whitec, (pkmesh / bs**3)**0.5)
linear = tfpf.c2r3d(lineark, norm=nc**3, name='linear')
icstate = tfpm.lptinit(linear, config, name='icstate')
fnstate = tfpm.nbody(icstate, config, verbose=False, name='fnstate')
final = tf.zeros_like(linear)
final = tfpf.cic_paint(final,
                       fnstate[0],
                       boxsize=config['boxsize'],
                       name='final')


def relu(x):
    mask = x < 0
    y = x.copy()
    y[mask] *= 0
    return y


with tf.Session() as sess:

    sess.run(tf.global_variables_initializer())
    for i in range(10):
        seed = i