コード例 #1
0
z_dim = 100
beta = 1 #diversity hyper param
# clip = 0.01
n_critic = 1 #
n_generator = 1
gan_type="selective_sampling"
dir="results/"+gan_type+"-"+datetime.datetime.now().strftime("%Y%m%d-%H%M%S")

np.random.seed(0)
tf.set_random_seed(1234)

# restore = False
# ckpt_dir =

''' data '''
data_pool = my_utils.getMNISTDatapool(batch_size, keep=[4, 9])

""" graphs """
generator = models.ss_generator
discriminator = models.ss_discriminator
optimizer = tf.train.AdamOptimizer


# inputs
real = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
z = tf.placeholder(tf.float32, shape=[None, z_dim])


# generator
fake = generator(z, reuse=False, name="g1")
fake2 = generator(z, reuse=False, name="g2")
コード例 #2
0
import tensorflow as tf
import models_mnist as models
import datetime
import my_utils
""" param """
epoch = 100
batch_size = 64
lr = 0.0002
z_dim = 2
n_critic = 1  #
n_generator = 1
gan_type = "gan-v-gmm"
dir = "results/" + gan_type + "-" + datetime.datetime.now().strftime(
    "%Y%m%d-%H%M%S")
''' data '''
data_pool = my_utils.getMNISTDatapool(batch_size, keep=[0, 4, 5, 7])
""" graphs """
generator = models.generator
discriminator = models.ss_discriminator
classifier = models.multi_c_discriminator3
optimizer = tf.train.AdamOptimizer

#sample from gmm
# with tf.variable_scope("gmm", reuse=False):
#     mu_1 = tf.get_variable("mean1", [z_dim], initializer=tf.constant_initializer(0))
#     mu_2 = tf.get_variable("mean2", [z_dim], initializer=tf.constant_initializer(0))
#     mu_3 = tf.get_variable("mean3", [z_dim], initializer=tf.constant_initializer(0))
#     mu_4 = tf.get_variable("mean4", [z_dim], initializer=tf.constant_initializer(0))
#     log_sigma_sq1 = tf.get_variable("log_sigma_sq1", [z_dim], initializer=tf.constant_initializer(0.001))
#     log_sigma_sq2 = tf.get_variable("log_sigma_sq2", [z_dim], initializer=tf.constant_initializer(0.001))
#     log_sigma_sq3 = tf.get_variable("log_sigma_sq3", [z_dim], initializer=tf.constant_initializer(0.001))
コード例 #3
0
ファイル: tests.py プロジェクト: wangmn93/ms-th-2018
    # temp = tf.expand_dims(tf.sqrt(tf.exp(softmaxs)), -1) # batch_size x 4 x 1
    r = tf.einsum('ib,ibk->ik', softmaxs, zs)
    sess = tf.Session()
    # t = tf.tile(tf.expand_dims(mus, 0),[10,1,1])
    print zs.shape
    print softmaxs.shape
    print r.shape
    # r = tf.reshape(r,[batch_size,z_dim])
    # print sess.run([t])

    print sess.run([mus, r])
    sess.close()

if 0:
    batch_size = 10
    data_pool_1 = my_utils.getMNISTDatapool(batch_size,
                                            keep=[0])  # range -1 ~ 1
    data_pool_2 = my_utils.getMNISTDatapool(batch_size, keep=[1])
    imgs_1 = data_pool_1.batch('img')
    imgs_2 = data_pool_2.batch('img')

    for i, j in zip(imgs_1, imgs_2):
        fig = plt.figure()

        fig.add_subplot(1, 2, 0)
        img = np.reshape(i, [28, 28])
        plt.imshow(img, cmap='gray')

        img_2 = np.reshape(j, [28, 28])
        fig.add_subplot(1, 2, 1)
        plt.imshow(img_2, cmap='gray')
コード例 #4
0
ファイル: cat-multi-gan.py プロジェクト: wangmn93/ms-th-2018
#replace the G in CATGAN with a multi-heads G, modify the obj of G
#s.t each G try to minimize its marginal entropy, while all Gs try to maximize mar-entropy
""" param """
epoch = 100
batch_size = 100
lr = 2e-4
beta1 = 0.5
z_dim = 128
n_critic = 1 #
n_generator = 1
gan_type="cat-multi-gan"
dir="results/"+gan_type+"-"+datetime.datetime.now().strftime("%Y%m%d-%H%M%S")


''' data '''
data_pool = my_utils.getMNISTDatapool(batch_size, keep=[1,3,5]) #range -1 ~ 1
data_pool_2 =  my_utils.getMNISTDatapool(batch_size, [1])
data_pool_3 =  my_utils.getMNISTDatapool(batch_size, [3])
data_pool_4 =  my_utils.getMNISTDatapool(batch_size, [5])

""" graphs """
generator = partial(models.cat_generator_m,heads=3)
discriminator = partial(models.cat_conv_discriminator, out_dim=3)
# classifier = models.multi_c_discriminator
# discriminator2 = models.ss_discriminator
optimizer = tf.train.AdamOptimizer

# inputs
real = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
real_ = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
real_2 = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
コード例 #5
0
import my_utils


""" param """
epoch = 100
batch_size = 100
lr = 1e-4
z_dim = 2
n_critic = 1 #
n_generator = 1
gan_type="aae"
dir="results/"+gan_type+"-"+datetime.datetime.now().strftime("%Y%m%d-%H%M%S")


''' data '''
data_pool = my_utils.getMNISTDatapool(batch_size, keep=[0,1,9]) #range -1 ~ 1


""" graphs """
encoder = models.encoder
decoder = models.decoder
optimizer = tf.train.AdamOptimizer
discriminator = models.discriminator_for_latent
# classifier = models.ss_discriminator

#sample from gmm
k = 3
mus = [[0.5, 0.5],[-0.5, 0.5],[-0.5,-0.5]]
cov = [[1 ,0],[0, 1]]

def sample_gmm(size, k=3):
コード例 #6
0
ファイル: aae.py プロジェクト: wangmn93/VaDE
import datetime
import my_utils
""" param """
epoch = 70
batch_size = 100
lr = 1e-3
z_dim = 10
n_critic = 1  #
n_generator = 1
gan_type = "aae"
dir = "results/" + gan_type + "-" + datetime.datetime.now().strftime(
    "%Y%m%d-%H%M%S")
X, Y = my_utils.load_data('mnist')
X = np.reshape(X, [70000, 28, 28, 1])
''' data '''
data_pool = my_utils.getMNISTDatapool(batch_size, shift=False)  #range 0 ~ 1
""" graphs """
encoder = models.encoder
decoder = models.decoder
optimizer = tf.train.AdamOptimizer
discriminator = models.discriminator_for_latent

# inputs
real = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
random_z = tf.placeholder(tf.float32, shape=[None, z_dim])

# encoder
z_mu, z_log_var = encoder(real, reuse=False)

#decoder
x_hat = decoder(z_mu, reuse=False)