예제 #1
0
def genIt(name='bird'):
    z = tf.random_normal((batch_size, rand_dim))
    gen = generator(z)
    with tf.Session() as sess:
        sess.run(
            tf.group(tf.global_variables_initializer(),
                     tf.sg_phase().assign(False)))
        tf.sg_restore(sess,
                      tf.train.latest_checkpoint('asset/train/gan'),
                      category=['generator', 'discriminator'])
        fake_features = []
        for i in range(100):
            fake_features.append(sess.run(gen))
    np.save('../data/fake_' + name + '_negative.npy',
            np.array(fake_features).reshape((-1, 4096)))
예제 #2
0
def trainIt():
    data = prepareData()
    x = data['train'][0]
    # x = data['train']
    z = tf.random_normal((batch_size, rand_dim))
    gen = generator(z)
    disc_real = discriminator(x)
    disc_fake = discriminator(gen)
    loss_d_r = disc_real.sg_mse(target=data['train'][1], name='disc_real')
    # loss_d_r = disc_real.sg_mse(target = tf.ones(batch_size), name = 'disc_real')
    loss_d_f = disc_fake.sg_mse(target=tf.zeros(batch_size), name='disc_fake')
    loss_d = (loss_d_r + loss_d_f) / 2
    loss_g = disc_fake.sg_mse(target=tf.ones(batch_size), name='gen')
    # train_disc = tf.sg_optim(loss_d, lr=0.01, name = 'train_disc', category = 'discriminator')  # discriminator train ops
    train_disc = tf.sg_optim(loss_d_r,
                             lr=0.01,
                             name='train_disc',
                             category='discriminator')
    train_gen = tf.sg_optim(loss_g, lr=0.01,
                            category='generator')  # generator train ops

    @tf.sg_train_func
    def alt_train(sess, opt):
        if sess.run(tf.sg_global_step()) % 1 == 0:
            l_disc = sess.run([loss_d_r,
                               train_disc])[0]  # training discriminator
        else:
            l_disc = sess.run(loss_d)
        # l_gen = sess.run([loss_g, train_gen])[0]  # training generator
        # print np.mean(l_gen)
        return np.mean(l_disc)  #+ np.mean(l_gen)

    alt_train(log_interval=10,
              max_ep=25,
              ep_size=(1100 + 690) / batch_size,
              early_stop=False,
              save_dir='asset/train/gan',
              save_interval=10)
# target continuous variable # 1
target_cval_1 = tf.placeholder(dtype=tf.sg_floatx, shape=batch_size)
# target continuous variable # 2
target_cval_2 = tf.placeholder(dtype=tf.sg_floatx, shape=batch_size)

# category variables
z = (tf.ones(batch_size, dtype=tf.sg_intx) *
     target_num).sg_one_hot(depth=cat_dim)

# continuous variables
z = z.sg_concat(
    target=[target_cval_1.sg_expand_dims(),
            target_cval_2.sg_expand_dims()])

# random seed = categorical variable + continuous variable + random normal
z = z.sg_concat(target=tf.random_normal((batch_size, rand_dim)))

# generator
gen = generator(z).sg_squeeze()


#
# run generator
#
def run_generator(num, x1, x2, fig_name='sample.png'):
    with tf.Session() as sess:

        tf.sg_init(sess)

        # restore parameters
        tf.sg_restore(sess,
예제 #4
0
x = data.train.image
y = data.train.label

# labels for discriminator
y_real = tf.ones(batch_size)
y_fake = tf.zeros(batch_size)

# discriminator labels ( half 1s, half 0s )
y_disc = tf.concat(0, [y, y * 0])

# categorical latent variable
z_cat = tf.multinomial(
    tf.ones((batch_size, cat_dim), dtype=tf.sg_floatx) / cat_dim,
    1).sg_squeeze().sg_int()
# continuous latent variable
z_con = tf.random_normal((batch_size, con_dim))
# random latent variable dimension
z_rand = tf.random_normal((batch_size, rand_dim))
# latent variable
z = tf.concat(1, [z_cat.sg_one_hot(depth=cat_dim), z_con, z_rand])

#
# Computational graph
#

# generator
gen = generator(z)

# add image summary
tf.sg_summary_image(x, name='real')
tf.sg_summary_image(gen, name='fake')
예제 #5
0
data = tf.sg_data.Mnist(batch_size=32)

# input images
x = data.train.image

#
# Computational graph
#

# encoder network
with tf.sg_context(name='encoder', size=4, stride=2, act='relu'):
    mu = (x.sg_conv(dim=64).sg_conv(dim=128).sg_flatten().sg_dense(
        dim=1024).sg_dense(dim=num_dim, act='linear'))

# re-parameterization trick with random gaussian
z = mu + tf.random_normal(mu.get_shape())

# decoder network
with tf.sg_context(name='decoder', size=4, stride=2, act='relu'):
    xx = (z.sg_dense(dim=1024).sg_dense(dim=7 * 7 * 128).sg_reshape(
        shape=(-1, 7, 7, 128)).sg_upconv(dim=64).sg_upconv(dim=1,
                                                           act='sigmoid'))

# add image summary
tf.sg_summary_image(x, name='origin')
tf.sg_summary_image(xx, name='recon')

# loss
loss_recon = xx.sg_mse(target=x, name='recon').sg_mean(axis=[1, 2, 3])
loss_kld = tf.square(mu).sg_sum(axis=1) / (28 * 28)
tf.sg_summary_loss(loss_kld, name='kld')
예제 #6
0
                       reuse=reuse):

        # generator network
        res = (tensor.sg_dense(dim=1024, name='fc1').sg_dense(
            dim=7 * 7 * 128,
            name='fc2').sg_reshape(shape=(-1, 7, 7, 128)).sg_upconv(
                dim=64, name='conv1').sg_upconv(dim=1,
                                                act='sigmoid',
                                                bn=False,
                                                name='conv2'))

        return res


# random normal seed
z = tf.random_normal((batch_size, rand_dim))

# generator
gen = generator(z).sg_squeeze()

#
# draw samples
#

with tf.Session() as sess:

    tf.sg_init(sess)

    # restore parameters
    tf.sg_restore(sess,
                  tf.train.latest_checkpoint('asset/train/gan'),
예제 #7
0
#
# inputs
#
# MNIST input tensor ( with QueueRunner )
data = tf.sg_data.Mnist(batch_size=32)

# input images
x = data.train.image

# labels for discriminator
y_real = tf.ones(batch_size)
y_fake = tf.zeros(batch_size)

# random gaussian seed
z = tf.random_normal((data.batch_size, rand_dim))

#
# Computational graph
#
# generator
gen = generator(z)

# add image summary
tf.sg_summary_image(x, name='real')
tf.sg_summary_image(gen, name='fake')

# discriminator
disc_real = discriminator(x)
disc_fake = discriminator(gen)
예제 #8
0
# hyper parameters
#

batch_size = 100  # batch size
num_dim = 50  # latent dimension

#
# inputs
#

#
# Computational graph
#

# random gaussian seed
z = tf.random_normal((batch_size, num_dim))

# decoder network
with tf.sg_context(name='decoder', size=4, stride=2, act='relu'):
    gen = (z.sg_dense(dim=1024).sg_dense(dim=7 * 7 * 128).sg_reshape(
        shape=(-1, 7, 7,
               128)).sg_upconv(dim=64).sg_upconv(dim=1,
                                                 act='sigmoid').sg_squeeze())

#
# draw samples
#

with tf.Session() as sess:

    tf.sg_init(sess)
예제 #9
0
파일: nn.py 프로젝트: jackyzha0/vybe

def t_get_indices(batchsize):
    index = np.arange(batchsize)
    np.random.shuffle(index)
    return index


## Training Loop
sd = 1 / np.sqrt(num_features)
with tf.name_scope('input'):
    X = tf.placeholder(tf.float32, [None, num_features], name="x_inp")
    Y = tf.placeholder(tf.float32, [None, num_classes], name="y_inp")

W_1 = tf.Variable(
    tf.random_normal([num_features, n_hidden_units_one], mean=0, stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean=0, stddev=sd))
h_1 = tf.nn.tanh(tf.matmul(X, W_1) + b_1)

W_2 = tf.Variable(
    tf.random_normal([n_hidden_units_one, n_hidden_units_two],
                     mean=0,
                     stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean=0, stddev=sd))
h_2 = tf.nn.tanh(tf.matmul(h_1, W_2) + b_2)

W_3 = tf.Variable(
    tf.random_normal([n_hidden_units_two, n_hidden_units_three],
                     mean=0,
                     stddev=sd))
b_3 = tf.Variable(tf.random_normal([n_hidden_units_three], mean=0, stddev=sd))