Ejemplo n.º 1
0
# encoder network
with tf.sg_context(name='encoder', size=4, stride=2, act='relu'):
    z = (x
         .sg_conv(dim=64)
         .sg_conv(dim=128)
         .sg_flatten()
         .sg_dense(dim=1024)
         .sg_dense(dim=num_dim, act='linear'))

# decoder network
with tf.sg_context(name='decoder', size=4, stride=2, act='relu'):
    xx = (z
          .sg_dense(dim=1024)
          .sg_dense(dim=7*7*128)
          .sg_reshape(shape=(-1, 7, 7, 128))
          .sg_upconv(dim=64)
          .sg_upconv(dim=1, act='sigmoid'))

# add image summary
tf.sg_summary_image(x, name='origin')
tf.sg_summary_image(xx, name='recon')

# loss
loss = xx.sg_mse(target=x)


# do training
tf.sg_train(loss=loss, log_interval=10, ep_size=data.train.num_batch, save_dir='asset/train/sae')

Ejemplo n.º 2
0
# continuous latent variable
z_con = tf.random_normal((batch_size, con_dim))
# random latent variable dimension
z_rand = tf.random_normal((batch_size, rand_dim))
# latent variable
z = tf.concat(1, [z_cat.sg_one_hot(depth=cat_dim), z_con, z_rand])

#
# Computational graph
#

# generator
gen = generator(z)

# add image summary
tf.sg_summary_image(x, name='real')
tf.sg_summary_image(gen, name='fake')

# discriminator
disc_real, cat_real, _ = discriminator(x)
disc_fake, cat_fake, con_fake = discriminator(gen)

#
# loss
#

# discriminator loss
loss_d_r = disc_real.sg_bce(target=y_real, name='disc_real')
loss_d_f = disc_fake.sg_bce(target=y_fake, name='disc_fake')
loss_d = (loss_d_r + loss_d_f) / 2
Ejemplo n.º 3
0
x_n = x * snr + tf.random_uniform(x.get_shape()) * (1 - snr)

#
# Computational graph
#

# encoder network
with tf.sg_context(name='encoder', size=4, stride=2, act='relu'):
    z = (x_n.sg_conv(dim=64).sg_conv(dim=128).sg_flatten().sg_dense(
        dim=1024).sg_dense(dim=num_dim, act='linear'))

# decoder network
with tf.sg_context(name='decoder', size=4, stride=2, act='relu'):
    xx = (z.sg_dense(dim=1024).sg_dense(dim=7 * 7 * 128).sg_reshape(
        shape=(-1, 7, 7, 128)).sg_upconv(dim=64).sg_upconv(dim=1,
                                                           act='sigmoid'))

# add image summary
tf.sg_summary_image(x, name='origin')
tf.sg_summary_image(x_n, name='noised')
tf.sg_summary_image(xx, name='recon')

# loss
loss = xx.sg_mse(target=x)

# do training
tf.sg_train(loss=loss,
            log_interval=10,
            ep_size=data.train.num_batch,
            save_dir='asset/train/dae')
Ejemplo n.º 4
0
y_disc = tf.concat([y, y * 0], 0)

#
# create generator
#
# I've used ESPCN scheme
# http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shi_Real-Time_Single_Image_CVPR_2016_paper.pdf
#

# generator network
with tf.sg_context(name='generator', act='relu', bn=True):
    gen = (x_small.sg_conv(dim=32).sg_conv().sg_conv(
        dim=4, act='sigmoid', bn=False).sg_periodic_shuffle(factor=2))

# add image summary
tf.sg_summary_image(gen)

#
# input image pairs
#
x_real_pair = tf.concat([x_nearest, x], 3)
x_fake_pair = tf.concat([x_nearest, gen], 3)

#
# create discriminator & recognizer
#

# create real + fake image input
xx = tf.concat([x_real_pair, x_fake_pair], 0)

with tf.sg_context(name='discriminator', size=4, stride=2, act='leaky_relu'):
Ejemplo n.º 5
0
# create generator
#
# I've used ESPCN scheme
# http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shi_Real-Time_Single_Image_CVPR_2016_paper.pdf
#

# generator network
with tf.sg_context(name='generator', act='relu', bn=True):
    gen = (x_small
           .sg_conv(dim=32)
           .sg_conv()
           .sg_conv(dim=4, act='sigmoid', bn=False)
           .sg_periodic_shuffle(factor=2))

# add image summary
tf.sg_summary_image(gen)

#
# input image pairs
#
x_real_pair = tf.concat(3, [x_nearest, x])
x_fake_pair = tf.concat(3, [x_nearest, gen])

#
# create discriminator & recognizer
#

# create real + fake image input
xx = tf.concat(0, [x_real_pair, x_fake_pair])

with tf.sg_context(name='discriminator', size=4, stride=2, act='leaky_relu'):