예제 #1
0
 def __init__(self, sess, img_h, img_w, img_c, op):
     #  input
     self.sess = sess
     self.output_height, self.output_width = img_h, img_w
     self.c_dim = img_c
     self.orig_embed_size = 4800
     #  input batch
     self.op = op
     if self.op == "train":
         self.batch_size = 64
         print "loading training data......"
         with open("./train_data/img_objs.pk", "r") as f:
             img_objs = pk.load(f)
         self.data_size = len(img_objs)
         print "number of image {}".format(self.data_size)
         self.batch_num = self.data_size / self.batch_size
         print "number of batch {}".format(self.batch_num)
         batch = data_reader.get_train_batch(img_objs, self.batch_size)
         self.img_batch = batch[0]
         self.wimg_batch = batch[1]
         self.match_embed_batch = batch[2]
         self.mismatch_embed_batch = batch[3]
     elif self.op == "test":
         self.batch_size = 1
         print "loading testing data"
         self.test_sent = tf.placeholder(
             tf.float32, shape=[self.batch_size, self.orig_embed_size])
     #  network setting
     self.gf_dim = 64
     self.df_dim = 64
     self.z_dim = 100
     self.embed_size = 128
     self.keep_prob = tf.placeholder(tf.float32)
     #  batch_norm of discriminator
     self.d_bn0 = batch_norm(name="d_bn0")
     self.d_bn1 = batch_norm(name="d_bn1")
     self.d_bn2 = batch_norm(name="d_bn2")
     self.d_bn3 = batch_norm(name="d_bn3")
     self.d_bn4 = batch_norm(name="d_bn4")
     #  batch_norm of generator
     self.g_bn0 = batch_norm(name="g_bn0")
     self.g_bn1 = batch_norm(name="g_bn1")
     self.g_bn2 = batch_norm(name="g_bn2")
     self.g_bn3 = batch_norm(name="g_bn3")
     #  build model
     print "building model......"
     self.build_model()
예제 #2
0
 def __init__(self, sess, img_h, img_w, img_c, op):
     #---input setting---#
     self.sess = sess
     self.op = op
     self.output_height, self.output_width = img_h, img_w
     self.c_dim = img_c
     self.orig_embed_size = 4800
     #---training data---#
     if op == "train":
         self.batch_size = 64
         print "loading training data......"
         with open("./train_data/img_objs_new.pk", "r") as f:
             img_objs = pk.load(f)
         batch = data_reader.get_train_batch(img_objs, self.batch_size)
         self.rimg_batch = batch[0]
         self.wimg_batch = batch[1]
         self.match_embed_batch = batch[2]
         self.mismatch_embed_batch = batch[3]
     #---testing data---#
     if op == "test":
         self.batch_size = 1
         self.test_sent = tf.placeholder(tf.float32, shape=
         [1, self.orig_embed_size])
     #---model network setting---#
     self.gf_dim = 64
     self.df_dim = 64
     self.z_dim = 100
     self.embed_size = 128
     self.keep_prob = tf.placeholder(tf.float32)
     #---batch_norm of discriminator---#
     self.d_bn0 = batch_norm(name="d_bn0")
     self.d_bn1 = batch_norm(name="d_bn1")
     self.d_bn2 = batch_norm(name="d_bn2")
     self.d_bn3 = batch_norm(name="d_bn3")
     self.d_bn4 = batch_norm(name="d_bn4")
     #---batch_norm of generator---#
     self.g_bn0 = batch_norm(name="g_bn0")
     self.g_bn1 = batch_norm(name="g_bn1")
     self.g_bn2 = batch_norm(name="g_bn2")
     self.g_bn3 = batch_norm(name="g_bn3")
     #---build model---#
     print "building model......"
     self.build_model()
예제 #3
0
aim_mod = AIM_gen.AIM_gen()

# get data_reader
import data_reader

data_reader = data_reader.data_reader('outpt.txt')

BSIZE = 32
ITER_PER_EPOC = 200000 // BSIZE
EPOC = 50
MAX_ITER = ITER_PER_EPOC * EPOC

ETA = M.ETA(MAX_ITER)
ETA.start()
for iteration in range(MAX_ITER + 1):
    img, age_fake, age = data_reader.get_train_batch(BSIZE)
    losses = aim_mod.train(img, age_fake, age, normalize=True)
    if iteration % 10 == 0:
        print('------ Iteration %d ---------' % iteration)
        aim_mod.display_losses(losses)
        print('ETA', ETA.get_ETA(iteration))
    if iteration % 1000 == 0 and iteration > 0:
        aim_mod.save('%d.ckpt' % iteration)

    if iteration % 100 == 0:

        gen = aim_mod.eval(img, age_fake)
        img = np.uint8(img)
        gen = np.uint8(gen)
        for i in range(BSIZE):
            cv2.imwrite('./res/%d_%d_1.jpg' % (iteration, i), img[i])
예제 #4
0
# get data_reader
import data_reader
data_reader = data_reader.data_reader('outpt.txt')

M.set_gpu('1')

BSIZE = 32
ITER_PER_EPOC = 200000 // BSIZE
EPOC = 30
MAX_ITER = ITER_PER_EPOC * EPOC

aim_mod = AIM.AIM(data_reader.age_class, data_reader.max_id)

ETA = M.ETA(MAX_ITER)
ETA.start()
for iteration in range(MAX_ITER + 1):
    img, target, uniform, age, idn = data_reader.get_train_batch(BSIZE)
    losses, generated = aim_mod.train(img,
                                      target,
                                      uniform,
                                      age,
                                      idn,
                                      normalize=True)
    if iteration % 10 == 0:
        print('------ Iteration %d ---------' % iteration)
        aim_mod.display_losses(losses)
        print('ETA', ETA.get_ETA(iteration))
    if iteration % 1000 == 0 and iteration > 0:
        aim_mod.save('%d.ckpt' % iteration)