def main():
    """Main function."""
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
    tf_config = {'rnd.np_random_seed': 1}
    tflib.init_tf(tf_config)
    assert os.path.exists(args.restore_path)
    _, _, _, Gs, _ = load_pkl(args.restore_path)
    latent_dim = Gs.components.synthesis.input_shape[2]
    print(f'Latent dimension shape: {latent_dim}')

    # Building graph
    Z = tf.placeholder('float32', [None, latent_dim], name='Gaussian')
    print(f'Z in tensorflow graph: {Z.shape}')
    sampling_from_z = Gs.get_output_for(Z, None, randomize_noise=True)
    sess = tf.get_default_session()

    save_dir = args.output_dir or './outputs/sampling'
    os.makedirs(save_dir, exist_ok=True)

    print('Sampling...')
    for it in tqdm(range(args.total_nums)):
        samples = sess.run(
            sampling_from_z,
            {Z: np.random.randn(args.batch_size * 2, latent_dim)})
        samples = samples.transpose(0, 2, 3, 1)
        print(f'shape of output: {samples.shape}')
        imwrite(immerge(samples, 2, args.batch_size),
                '%s/sampling_%06d_newseed.png' % (save_dir, it))
Пример #2
0
def main():
    """Main function."""
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
    tf_config = {'rnd.np_random_seed': 1000}
    tflib.init_tf(tf_config)
    assert os.path.exists(args.restore_path)
    E, _, _, Gs, _ = load_pkl(args.restore_path)
    num_layers = Gs.components.synthesis.input_shape[1]

    # Building graph
    real = tf.placeholder('float32', [None, 3, args.image_size, args.image_size], name='real_image')
    encoder_w = E.get_output_for(real, phase=False)
    encoder_w_tile = tf.tile(encoder_w[:, np.newaxis], [1, num_layers, 1])
    reconstructor = Gs.components.synthesis.get_output_for(encoder_w_tile, randomize_noise=False)
    sess = tf.get_default_session()

    # Preparing data
    input_images, _ = preparing_data(im_path=args.data_dir_test, img_type=args.img_type)

    save_dir = args.output_dir or './outputs/reconstruction'
    os.makedirs(save_dir, exist_ok=True)

    print('Reconstructing...')
    for it, image_id in tqdm(enumerate(range(0, input_images.shape[0], args.batch_size))):
        batch_images = input_images[image_id:image_id+args.batch_size]
        rec = sess.run(reconstructor, feed_dict={real: batch_images})
        orin_recon = np.concatenate([batch_images, rec], axis=0)
        orin_recon = orin_recon.transpose(0, 2, 3, 1)
        imwrite(immerge(orin_recon, 2, batch_images.shape[0]),
                '%s/reconstruction_%06d.png' % (save_dir, it))
def main():
    """Main function."""
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
    tf_config = {'rnd.np_random_seed': 1000}
    tflib.init_tf(tf_config)
    assert os.path.exists(args.restore_path)
    _, _, _, Gs, _ = load_pkl(args.restore_path)
    num_layers = Gs.components.synthesis.input_shape[1]

    batch_codes = np.load(args.data_dir_encode)
    print(batch_codes.shape)
    latent_dim = batch_codes.shape[1]
    print(f'Latent dimension shape: {latent_dim}')

    # Building graph
    w_vec = tf.placeholder('float32', [None, latent_dim], name='w_codes')
    print(f'W in tensorflow graph: {w_vec.shape}')
    encoder_w_tile = tf.tile(w_vec[:, np.newaxis], [1, num_layers, 1])
    print(f'encoder_w_tile size: {encoder_w_tile.shape}')
    reconstructor = Gs.components.synthesis.get_output_for(
        encoder_w_tile, randomize_noise=False)
    sess = tf.get_default_session()

    save_dir = args.output_dir or './outputs/rebuild_encodings'
    os.makedirs(save_dir, exist_ok=True)

    print('Creating Images...')
    samples = sess.run(reconstructor, {w_vec: batch_codes})
    samples = samples.transpose(0, 2, 3, 1)
    print(f'shape of output: {samples.shape}')
    imwrite(immerge(samples, 4, args.batch_size),
            '%s/decode_00000_new1.png' % (save_dir))
def main():
    """Main function."""
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
    tf_config = {'rnd.np_random_seed': 1000}
    tflib.init_tf(tf_config)
    assert os.path.exists(args.restore_path)
    assert os.path.exists(args.boundary)
    E, _, _, Gs, _ = load_pkl(args.restore_path)
    num_layers, latent_dim = Gs.components.synthesis.input_shape[1:3]

    # Building graph
    real = tf.placeholder('float32',
                          [None, 3, args.image_size, args.image_size],
                          name='real_image')
    W = tf.placeholder('float32', [None, num_layers, latent_dim],
                       name='Gaussian')
    encoder_w = E.get_output_for(real, phase=False)
    reconstruction_from_w = Gs.components.synthesis.get_output_for(
        W, randomize_noise=False)
    sess = tf.get_default_session()

    # Preparing data
    input_images, images_name = preparing_data(im_path=args.data_dir_test,
                                               img_type=args.img_type)

    boundary = np.load(args.boundary)
    boundary_name = args.boundary.split('/')[-1].split('_')[0]

    save_dir = args.output_dir or './outputs/manipulation'
    os.makedirs(save_dir, exist_ok=True)

    print('manipulation in w space on %s' % (boundary_name))
    for i in tqdm(range(input_images.shape[0])):
        input_image = input_images[i:i + 1]
        im_name = images_name[i]
        latent_code = sess.run(encoder_w, feed_dict={real: input_image})
        codes = manipulate(latent_code,
                           boundary,
                           num_layers=num_layers,
                           step=args.step,
                           start_distance=args.start_distance,
                           end_distance=args.end_distance)
        inputs = np.zeros((args.batch_size, num_layers, latent_dim),
                          np.float32)
        output_images = []
        for idx in range(0, args.step, args.batch_size):
            batch = codes[idx:idx + args.batch_size]
            inputs[0:len(batch)] = batch
            images = sess.run(reconstruction_from_w, feed_dict={W: inputs})
            output_images.append(images[0:len(batch)])
        output_images = np.concatenate(output_images, axis=0)
        output_images = np.concatenate([input_image, output_images], axis=0)
        output_images = output_images.transpose(0, 2, 3, 1)
        imwrite(immerge(output_images, 1, args.step + 1),
                '%s/%s_%s.png' % (save_dir, im_name, boundary_name))
Пример #5
0
def reconstruction(sess, images, real, reconstructor, save_dir):
    it = 0
    for image_id in tqdm(range(0, len(images), args.batch_size)):
        images_name = images[image_id:image_id+args.batch_size]
        batch_images = []
        for im_name in images_name:
            batch_images.append(cv2.imread(im_name)[:, :, ::-1])
        batch_images = np.asarray(batch_images)
        batch_images = adjust_dynamic_range(batch_images.astype(np.float32), [0, 255], [-1., 1.])
        rec = sess.run(reconstructor, feed_dict={real: batch_images.transpose(0, 3, 1, 2)})
        rec = rec.transpose(0, 2, 3, 1)
        rec = np.clip(rec, -1., 1.)
        orin_recon = np.concatenate([batch_images, rec], axis=0)
        imwrite(immerge(orin_recon, 2, len(images_name)), '%s/iter_%06d.png' % (save_dir, it))
        it += 1
Пример #6
0
def manipulation(sess, images, real, Z, get_w_from_img, get_img_from_w, boundaries, save_dir, steps=11, start_distance=-5., end_distance=5.):
    linspace = np.linspace(start_distance, end_distance, steps)
    linspace = linspace.reshape(-1, 1).astype(np.float32)
    for boundary in boundaries:
        attr = boundary.split('/')[-1].split('_')[1]
        print('manipulating on %s' % (attr))
        boundary_ = np.load(boundary)
        boundary_ = boundary_ * linspace
        for image in tqdm(images):
            img_1 = cv2.imread(image)[:, :, ::-1][np.newaxis]
            img_1_name = image.split('/')[-1].split('.')[0]
            img_1 = adjust_dynamic_range(img_1.astype(np.float32), [0, 255], [-1., 1.])
            latent_w = sess.run(get_w_from_img, feed_dict={real: img_1.transpose(0, 3, 1, 2)})
            inter = latent_w + boundary_
            mid_res = sess.run(get_img_from_w, feed_dict={Z: inter})
            mid_res = mid_res.transpose(0, 2, 3, 1)
            mid_res = np.concatenate([img_1, mid_res], axis=0)
            imwrite(immerge(mid_res, 1, steps + 1), '%s/%s_attr_%s.png' % (save_dir, img_1_name, attr))
Пример #7
0
def interpolation_on_w(sess, images, real, Z, get_w_from_img, get_img_from_w, step, save_dir):
    for image1 in tqdm(images):
        img_1 = cv2.imread(image1)[:, :, ::-1][np.newaxis]
        img_1 = adjust_dynamic_range(img_1.astype(np.float32), [0, 255], [-1., 1.])
        img_1_name = image1.split('/')[-1].split('.')[0]
        for image2 in images:
            img_2 = cv2.imread(image2)[:, :, ::-1][np.newaxis]
            img_2 = adjust_dynamic_range(img_2.astype(np.float32), [0, 255], [-1., 1.])
            img_2_name = image2.split('/')[-1].split('.')[0]

            latent_1 = sess.run(get_w_from_img, feed_dict={real: img_1.transpose(0, 3, 1, 2)})
            latent_2 = sess.run(get_w_from_img, feed_dict={real: img_2.transpose(0, 3, 1, 2)})

            linspace = np.linspace(0.0, 1.0, step)[:, np.newaxis].astype(np.float32)
            mid_res = latent_1 + linspace * (latent_2 - latent_1)

            mid_res = sess.run(get_img_from_w, feed_dict={Z: mid_res})
            mid_res = mid_res.transpose(0, 2, 3, 1)
            mid_res = np.clip(mid_res, -1., 1.)
            mid_res = np.concatenate([img_1, mid_res, img_2], axis=0)

            imwrite(immerge(mid_res, 1, step + 2), '%s/%s_%s.png' % (save_dir, img_1_name, img_2_name))
                                        (178 - crop_size) // 2, crop_size,
                                        crop_size)
    img = tf.to_float(
        tf.image.resize_images(
            img, [re_size, re_size],
            method=tf.image.ResizeMethod.BICUBIC)) / 127.5 - 1
    return img


sess = utils.session()

# iteration counter
it_cnt, update_cnt = utils.counter()

sess.run(tf.global_variables_initializer())
sess.run(it_cnt)
sess.run(update_cnt)

img_paths = glob.glob('/Users/idan.a/data/celeba/*.jpg')

data_pool = utils.DiskImageData(img_paths,
                                batch_size,
                                shape=[218, 178, 3],
                                preprocess_fn=preprocess_fn)
batch_epoch = len(data_pool) // (batch_size * n_critic)
real_ipt = data_pool.batch()
sess.run(it_cnt)
it_epoch = 1
# save_dir="tmp/"
scipy.misc.imsave('sss.png', utils.immerge(real_ipt, 10, 10))
Пример #9
0
        g_summary_opt, _ = sess.run([g_summary, g_step], feed_dict={z: z_ipt})
        summary_writer.add_summary(g_summary_opt, it)

        # display
        if it % 1 == 0:
            print("Epoch: (%3d) (%5d/%5d)" % (epoch, it_epoch, batch_epoch))

        # save
        if (it + 1) % 1000 == 0:
            save_path = saver.save(
                sess, '%s/Epoch_(%d)_(%dof%d).ckpt' %
                (ckpt_dir, epoch, it_epoch, batch_epoch))
            print('Model saved in file: % s' % save_path)

        # sample
        if (it + 1) % 100 == 0:
            f_sample_opt = sess.run(f_sample, feed_dict={z: z_ipt_sample})

            save_dir = './sample_images_while_training/simpsons_vgan'
            utils.mkdir(save_dir + '/')
            utils.imwrite(
                utils.immerge(f_sample_opt.reshape(re_size, re_size), 10,
                              10), '%s/Epoch_(%d)_(%dof%d).jpg' %
                (save_dir, epoch, it_epoch, batch_epoch))

except Exception as e:
    traceback.print_exc()
finally:
    print(" [*] Close main session!")
    sess.close()
Пример #10
0
def train():

    with tf.variable_scope('input'):
        real_image = tf.placeholder(tf.float32,
                                    shape=[None, 64, 64, 3],
                                    name='real_image')
        random_input = tf.placeholder(tf.float32,
                                      shape=[None, random_dim, random_dim],
                                      name='random_input')

    fake_image = generator(random_input, is_train=True, reuse=False)
    real_result = discriminator(real_image, reuse=False)
    fake_result = discriminator(fake_image, reuse=True)

    # Define loss function
    d_loss = tf.reduce_mean(fake_result) - tf.reduce_mean(real_result)
    g_loss = -tf.reduce_mean(fake_result)

    # Add gradient penalty
    def gradient_penalty(real, fake, f):
        def interpolate(a, b):
            shape = tf.concat(
                (tf.shape(a)[0:1], tf.tile([1], [a.shape.ndims - 1])), axis=0)
            alpha = tf.random_uniform(shape=shape, minval=0., maxval=1.)
            inter = a + alpha * (b - a)
            inter.set_shape(a.get_shape().as_list())
            return inter

        x = interpolate(real, fake)
        pred = f(x)
        gradients = tf.gradients(pred, x)[0]
        slopes = tf.sqrt(
            tf.reduce_sum(tf.square(gradients), reduction_indices=[-1]))
        #slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=range(1, x.shape.ndims)))
        gp = tf.reduce_mean((slopes - 1.)**2)
        return gp

    gp = gradient_penalty(real_image, fake_image, discriminator)
    d_loss += gp * gp_lambda

    # Add various tf summary variables
    g_summary = tf.summary.scalar('g_loss', g_loss)
    d_summary = tf.summary.scalar('d_loss', d_loss)
    #writer_d = tf.summary.FileWriter('./summaries/logs/plot_d_loss')
    #writer_g = tf.summary.FileWriter('./summaries/logs/plot_g_loss')
    writer = tf.summary.FileWriter('./summaries/cartoon_wgan_gp_4')
    writer.add_graph(tf.get_default_graph())
    #my_summary_op = tf.summary.merge_all()

    t_vars = tf.trainable_variables()
    d_vars = [var for var in t_vars if 'discriminator' in var.name]
    g_vars = [var for var in t_vars if 'generator' in var.name]

    # Define the optimizers for WGAN-GP
    #d_optim = tf.train.GradientDescentOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
    #g_optim = tf.train.GradientDescentOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)

    d_optim = tf.train.AdamOptimizer(learning_rate, beta1=beta1,
                                     beta2=beta2).minimize(d_loss,
                                                           var_list=d_vars)
    g_optim = tf.train.AdamOptimizer(learning_rate, beta1=beta1,
                                     beta2=beta2).minimize(g_loss,
                                                           var_list=g_vars)

    #d_optim = tf.train.

    fake_sample = generator(random_input, is_train=False)

    start_time = time.time()

    sess = tf.Session()
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    ckpt_dir = './checkpoints/cartoon_wgan_gp_4'
    mkdir(ckpt_dir + '/')
    load_checkpoint(ckpt_dir, sess)

    print('[*] start training...')

    fake_ipt_sample = np.random.uniform(
        -1.0, 1.0, size=[random_dim, random_dim, random_dim])
    #fake_ipt_sample = np.random.normal(size=[random_dim, random_dim, random_dim])

    d_iters = 5
    epoch = 200
    batch_epoch = len(data_pool) // (batch_size * d_iters)
    max_it = epoch * batch_epoch

    for it in range(max_it):
        epoch = it // batch_epoch
        it_epoch = it % batch_epoch + 1

        for k in range(d_iters):
            train_image = data_pool.batch()
            fake_image = np.random.uniform(
                -1.0, 1.0, size=[batch_size, random_dim, random_dim])
            #fake_image = np.random.normal(size=[batch_size, random_dim, random_dim])
            dLoss, _ = sess.run([d_summary, d_optim],
                                feed_dict={
                                    random_input: fake_image,
                                    real_image: train_image
                                })
        writer.add_summary(dLoss, it)

        fake_image = np.random.uniform(
            -1.0, 1.0, size=[batch_size, random_dim, random_dim])
        #fake_image = np.random.normal(size=[batch_size, random_dim, random_dim])
        gLoss, _ = sess.run([g_summary, g_optim],
                            feed_dict={random_input: fake_image})
        writer.add_summary(gLoss, it)

        #summaries = sess.run(my_summary_op, feed_dict={random_input: fake_image, real_image: train_image})
        #writer.add_summary(summaries, it)

        # display
        if it % 1 == 0:
            print("Epoch: (%3d) (%5d/%5d)" % (epoch, it_epoch, batch_epoch))
        '''

        if it % 20 == 0:
            #print('train:[%d],d_loss:%f,g_loss:%f' % (it, dLoss, gLoss), 'time: ', time.time() - start_time)
            summaries = sess.run(my_summary_op, feed_dict={random_input: fake_image, real_image: train_image})
            writer.add_summary(summaries, it)
        '''

        # save
        if (it + 1) % 1000 == 0:
            save_path = saver.save(
                sess, '%s/Epoch_(%d)_(%dof%d).ckpt' %
                (ckpt_dir, epoch, it_epoch, batch_epoch))
            print('Model saved in file: % s' % save_path)

        # sample
        if (it + 1) % 100 == 0:
            fake_sample_opt = sess.run(
                fake_sample, feed_dict={random_input: fake_ipt_sample})
            save_dir = './sample_images_while_training/cartoon_wgan_gp_4'
            utils.mkdir(save_dir + '/')
            utils.imwrite(
                utils.immerge(fake_sample_opt, 8,
                              8), '%s/Epoch_(%d)_(%dof%d).jpg' %
                (save_dir, epoch, it_epoch, batch_epoch))
Пример #11
0
def main():

    image_size = 128
    z_dim = 256
    with tf.name_scope('input'):
        real = tf.placeholder('float32',
                              [args.batch_size, 3, image_size, image_size],
                              name='real_image')
        z = tf.placeholder('float32', [args.batch_size, z_dim],
                           name='Gaussian')

    G_x = generator_x(input_z=z, reuse=False)
    G_z = generator_z(input_x=real, reuse=False)

    Reconstruction = generator_x(generator_z(real, reuse=True), reuse=True)

    saver = tf.train.Saver()
    sess = tensorflow_session()

    if args.restore_path != '':
        print('resotre weights from {}'.format(args.restore_path))
        saver.restore(sess, tf.train.latest_checkpoint(args.restore_path))
        print('Load weights finished!!!')

    print('Getting training Test data...')
    image_batch = get_train_data(sess,
                                 data_dir=args.data_dir,
                                 batch_size=args.batch_size)

    if not os.path.exists(args.test_dir):
        os.mkdir(args.log_dir)

    ## Reconstruction
    for it in tqdm(range(200)):

        batch_images = sess.run(image_batch)
        batch_images = adjust_dynamic_range(batch_images.astype(np.float32),
                                            [0, 255], [0., 1.])

        recon = sess.run(Reconstruction, feed_dict={real: batch_images})
        recon = adjust_dynamic_range(recon.transpose(0, 2, 3, 1),
                                     drange_in=[0, 1],
                                     drange_out=[-1, 1])
        imwrite(immerge(recon[:25, :, :, :], 5, 5),
                '%s/epoch_%04d_recon.png' % (args.log_dir, it))

        batch_images = adjust_dynamic_range(batch_images.transpose(0, 2, 3, 1),
                                            drange_in=[0, 1],
                                            drange_out=[-1, 1])
        imwrite(immerge(batch_images[:25, :, :, :], 5, 5),
                '%s/epoch_%04d_orin.png' % (args.log_dir, it))

    # Sampling
    for it in tqdm(range(200)):
        latent_z = np.random.randn(args.batch_size, z_dim).astype(np.float32)
        samples1 = sess.run(G_x, feed_dict={z: latent_z})
        samples1 = adjust_dynamic_range(samples1.transpose(0, 2, 3, 1),
                                        drange_in=[0, 1],
                                        drange_out=[-1, 1])
        imwrite(immerge(samples1[:25, :, :, :], 5, 5),
                '%s/epoch_%04d_sampling.png' % (args.log_dir, it))
Пример #12
0
def train():
    alpha_span = 800000
    batch_size = 32
    ckpt_dir = './checkpoints/wgp'
    n_gen = 1
    n_critic = 1
    it_start = 0
    #epoch = 20*(alpha_span * 2 // (2*4936)) # 4936 is number of images
    
    def preprocess_fn(img):
        img = tf.image.resize_images(img, [target_size, target_size], method=tf.image.ResizeMethod.AREA) / 127.5 -1
        return img

    def preprocess_fn_dummy(img):
        img = tf.image.resize_images(img, [final_size, final_size], method=tf.image.ResizeMethod.AREA) / 127.5 -1
        return img
    
    # dataset
    img_paths = glob.glob('./imgs/faces/*.png')
    data_pool = utils.DiskImageData(5, img_paths, batch_size//2, shape=[640, 640, 3], preprocess_fn=preprocess_fn)
    data_pool_dummy = utils.DiskImageData(7, img_paths, 1, shape=[640, 640, 3], preprocess_fn=preprocess_fn_dummy)    
    batch_epoch = len(data_pool) // (batch_size * 1)#n_critic

    # build graph
    print('Building a graph ...')
    nodes = build(batch_size)
    # session
    sess = utils.session()
    saver = tf.train.Saver()
    # summary
    summary_writer = tf.summary.FileWriter('./summaries/wgp/', sess.graph)
    utils.mkdir(ckpt_dir + '/')

    print('Initializing all variables ...')
    sess.run(tf.global_variables_initializer())
    
    # run final size session for storing all variables to be used into the optimizer
    print('Running final size dummy session ...')
    #if target_size == initial_size and len(sys.argv) <= 3:
    #    _ = sess.run([nodes['dummy']['d']], feed_dict=get_ipt(2, final_size, 1.0, data_pool_dummy ,z_dim, nodes['dummy']['input'] ))
    #    _ = sess.run([nodes['dummy']['g']], feed_dict=get_ipt(2, final_size, 1.0, data_pool_dummy ,z_dim, nodes['dummy']['input'] ))
        
    # load checkpoint
    if len(sys.argv)>3 and sys.argv[2]=='resume':
        print ('Loading the checkpoint ...')
        saver.restore(sess, ckpt_dir+'/model.ckpt')
        it_start = 1 + int(sys.argv[3])
    last_saved_iter = it_start - 1

    ''' train '''
    for it in range(it_start, 9999999999):
        # fade alpha
        alpha_ipt = it / (alpha_span / batch_size)
        if alpha_ipt > 1 or target_size == initial_size:
            alpha_ipt = 1.0
        print('Alpha : %f' % alpha_ipt)
        alpha_ipt = 1.0
        
        # train D
        for i in range(n_critic):
            d_summary_opt, _ = sess.run([nodes['summaries']['d'], nodes['product']['d']],\
                feed_dict=get_ipt(batch_size, target_size, alpha_ipt, data_pool, z_dim, nodes['product']['input']))
        summary_writer.add_summary(d_summary_opt, it)

        # train G
        for i in range(n_gen):
            g_summary_opt, _ = sess.run([nodes['summaries']['g'], nodes['product']['g']],\
                feed_dict=get_ipt(batch_size, target_size, alpha_ipt, data_pool, z_dim, nodes['product']['input']))
        summary_writer.add_summary(g_summary_opt, it)
        
        # display
        epoch = it // batch_epoch
        it_epoch = it % batch_epoch + 1
        if it % 1 == 0:
            print("iter : %8d, epoch : (%3d) (%5d/%5d) _ resume point : %d" % (it, epoch, it_epoch, batch_epoch,last_saved_iter))

        # sample
        if (it + 1) % batch_epoch == 0:
            f_sample_opt = sess.run(nodes['sample'], feed_dict=get_ipt_for_sample(batch_size, z_dim, nodes['product']['input']))
            f_sample_opt = np.clip(f_sample_opt, -1, 1)
            save_dir = './sample_images_while_training/wgp/'
            utils.mkdir(save_dir + '/')
            osz = int(math.sqrt(batch_size))+1
            utils.imwrite(utils.immerge(f_sample_opt, osz, osz), '%s/iter_(%d).png' % (save_dir, it))
            
        # save
        if (it + 1) % batch_epoch == 0:
            last_saved_iter = it
            save_path = saver.save(sess, '%s/model.ckpt' % (ckpt_dir))
            print('Model saved in file: %s' % save_path)
Пример #13
0
def main():
    # check params

    # setup params

    PATH_TO_REC = "../data/"
    PATH_TO_CKPT = "./checkpoints"
    SOURCE = "rainy"
    TARGET = "sunny"
    EPOCHS = 100
    BATCH_SIZE = 1
    learning_rate = 0.0002
    beta1 = 0.5
    vlambda = 10
    ckpt_dir = os.path.join(PATH_TO_CKPT, SOURCE + "2" + TARGET)

    #
    # Build trainer
    #
    """Build trainer. Generator G maps source image from to target image. Target discriminator aims to
    distinguish between generated target image and real target image. Similar, generator F maps
    target image to source image with respective source discriminator."""

    # real placeholder
    source = tf.placeholder(tf.float32, shape=[None, 256, 256, 3])
    target = tf.placeholder(tf.float32, shape=[None, 256, 256, 3])

    # generated placeholder
    source2target = tf.placeholder(tf.float32, shape=[None, 256, 256, 3])
    target2source = tf.placeholder(tf.float32, shape=[None, 256, 256, 3])

    # generators (adversarial)
    G = model.generator(source, name="G")
    F = model.generator(target, name="F")

    # generators (cycle consistent); read '_' as 'composed with'
    F_G = model.generator(G, name="F", reuse=True)
    G_F = model.generator(F, name="G", reuse=True)

    # discriminators
    D_Y = model.discriminator(G, name="D_Y")
    D_X = model.discriminator(F, name="D_X")

    D_target = model.discriminator(target, name="D_Y", reuse=True)
    D_source = model.discriminator(source, name="D_X", reuse=True)

    D_source2target = model.discriminator(source2target,
                                          name="D_Y",
                                          reuse=True)
    D_target2source = model.discriminator(target2source,
                                          name="D_X",
                                          reuse=True)

    # loss (discriminators)
    loss_D_target = tf.reduce_mean(
        tf.squared_difference(D_target, tf.ones_like(D_target)))
    loss_D_source2target = tf.reduce_mean(tf.square(D_source2target))
    D_Y_loss = tf.identity((loss_D_target + loss_D_source2target) / 2.0,
                           name="D_Y_loss")

    loss_D_source = tf.reduce_mean(
        tf.squared_difference(D_source, tf.ones_like(D_source)))
    loss_D_target2source = tf.reduce_mean(tf.square(D_target2source))
    D_X_loss = tf.identity((loss_D_source + loss_D_target2source) / 2.0,
                           name="D_X_loss")

    # loss (generator)
    G_loss_gan = tf.reduce_mean(tf.squared_difference(D_Y, tf.ones_like(D_Y)))
    F_loss_gan = tf.reduce_mean(tf.squared_difference(D_X, tf.ones_like(D_X)))
    cycle_loss = tf.reduce_mean(tf.abs(F_G - source)) + tf.reduce_mean(
        tf.abs(G_F - target))

    generator_loss = tf.identity(G_loss_gan + F_loss_gan +
                                 vlambda * cycle_loss,
                                 name="Gen_loss")

    # get training variables
    trainable_var = tf.trainable_variables()
    D_Y_var = [var for var in trainable_var if "D_Y" in var.name]
    D_X_var = [var for var in trainable_var if "D_X" in var.name]
    generator_var = [
        var for var in trainable_var if "F" in var.name or "G" in var.name
    ]

    # get optimizers
    D_Y_optim = tf.train.AdamOptimizer(learning_rate,
                                       beta1=beta1).minimize(D_Y_loss,
                                                             var_list=D_Y_var)
    D_X_optim = tf.train.AdamOptimizer(learning_rate,
                                       beta1=beta1).minimize(D_X_loss,
                                                             var_list=D_X_var)
    generator_optim = tf.train.AdamOptimizer(
        learning_rate, beta1=beta1).minimize(generator_loss,
                                             var_list=generator_var)
    #
    # Load images
    #

    sess = tf.Session()

    # train images
    source_data = records.RecordProvider(sess,
                                         os.path.join(
                                             PATH_TO_REC,
                                             "train_" + SOURCE + ".tfrecords"),
                                         batch_size=BATCH_SIZE)
    target_data = records.RecordProvider(sess,
                                         os.path.join(
                                             PATH_TO_REC,
                                             "train_" + TARGET + ".tfrecords"),
                                         batch_size=BATCH_SIZE)

    cache_source2target = utils.ItemPool()
    cache_target2source = utils.ItemPool()

    # test images
    source_data_test = records.RecordProvider(
        sess,
        os.path.join(PATH_TO_REC, "test_" + SOURCE + ".tfrecords"),
        batch_size=BATCH_SIZE)
    target_data_test = records.RecordProvider(
        sess,
        os.path.join(PATH_TO_REC, "test_" + TARGET + ".tfrecords"),
        batch_size=BATCH_SIZE)

    #
    # Starting training
    #

    sess.run(tf.global_variables_initializer())

    saver = tf.train.Saver(tf.global_variables())
    ckpt = tf.train.get_checkpoint_state(ckpt_dir)

    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("[!] Using variables from %s" % ckpt.model_checkpoint_path)
    else:
        print("[!] Initialized variables")

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    counter = 0
    batch_epoch = min(len(source_data), len(target_data)) // BATCH_SIZE
    max_iter = EPOCHS * batch_epoch
    try:
        #while not coord.should_stop():
        for _ in range(0, max_iter + 1):
            print(0)
            # prepare data
            source_batch = source_data.feed()
            target_batch = target_data.feed()
            print(1)
            generated_target, generated_source = sess.run([G, F],
                                                          feed_dict={
                                                              source:
                                                              source_batch,
                                                              target:
                                                              target_batch
                                                          })
            print(2)
            source2target_batch = np.array(
                cache_source2target(list(generated_target)))
            target2source_batch = np.array(
                cache_target2source(list(generated_source)))
            generated_target, generated_source = sess.run([G, F])
            print(3)
            # train generator
            _ = sess.run(generator_optim,
                         feed_dict={
                             source: source_batch,
                             target: target_batch
                         })

            # train D_Y
            _ = sess.run(D_Y_optim,
                         feed_dict={
                             target: target_batch,
                             source2target: source2target_batch
                         })

            # train D_X
            _ = sess.run(D_X_optim,
                         feed_dict={
                             source: source_batch,
                             target2source: target2source_batch
                         })
            print(4)
            # print and save
            counter += 1
            if counter % 1000 == 0:
                print("[*] Iterations passed: %s" % counter)
                save_path = saver.save(
                    sess, os.path.join(ckpt_dir,
                                       "{:015}.ckpt".format(counter)))
                print("[*] Model saved in %s" % save_path)

            # sample test images
            if counter % 100 == 0:
                source_batch = source_data_test.feed()
                target_batch = target_data_test.feed()
                [s2t, s2t2s, t2s, t2s2t] = sess.run([G, F_G, F, G_F],
                                                    feed_dict={
                                                        source: source_batch,
                                                        target: target_batch
                                                    })
                sample = np.concatenate(
                    (source_batch, s2t, s2t2s, target, t2s, t2s2t), axis=0)

                save_dir = "../sample_while_training/"
                save_file = save_dir + SOURCE + "2" + TARGET + "%s{:015}.jpg".format(
                    counter)
                try:
                    utils.imwrite(utils.immerge(sample, 2, 3), save_file)
                except:
                    print("[!] Failed to save sample image to %s" % save_file)
                    # for the sake of laziness...
                    pass
            print("Passed round %s" % counter)
            if counter > max_iter:
                print("[!] Reached %s epochs" % EPOCHS)
                coord.request_stop()

    except Exception as e:
        coord.request_stop(e)
    finally:
        print("Finished.")
        coord.request_stop()
        coord.join(threads)
        sess.close()
Пример #14
0
def main():

    with tf.name_scope('input'):
        real = tf.placeholder(
            'float32', [args.batch_size, 3, args.image_size, args.image_size],
            name='real_image')
        z = tf.placeholder('float32', [args.batch_size, args.z_dim],
                           name='Gaussian')
        lr_g = tf.placeholder(tf.float32, None, name='learning_rate_g')
        lr_d = tf.placeholder(tf.float32, None, name='learning_rate_d')

    G_x = generator_x(input_z=z, reuse=False)
    G_z = generator_z(input_x=real, reuse=False)

    dis_fake, dis_fake_logit = discriminator(input_x=G_x,
                                             input_z=z,
                                             reuse=False)
    dis_real, dis_real_logit = discriminator(input_x=real,
                                             input_z=G_z,
                                             reuse=True)

    Reconstruction = generator_x(generator_z(real, reuse=True), reuse=True)

    with tf.variable_scope('generator_loss'):
        G_loss_img = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=dis_fake_logit, labels=tf.ones_like(dis_fake_logit)))
        G_loss_z = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=dis_real_logit, labels=tf.zeros_like(dis_real_logit)))
        G_loss = G_loss_img + G_loss_z

    with tf.variable_scope('discriminator_loss'):
        D_loss_real = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=dis_real_logit, labels=tf.ones_like(dis_real_logit)))
        D_loss_fake = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                logits=dis_fake_logit, labels=tf.zeros_like(dis_fake_logit)))
        D_loss = D_loss_real + D_loss_fake

    Genx_vars = [
        v for v in tf.global_variables() if v.name.startswith("generator_x")
    ]
    Genz_vars = [
        v for v in tf.global_variables() if v.name.startswith("generator_z")
    ]
    Dis_vars = [
        v for v in tf.global_variables() if v.name.startswith("discriminator")
    ]

    G_solver = tf.train.AdamOptimizer(lr_g, args.beta1, args.beta2).minimize(
        G_loss, var_list=Genx_vars + Genz_vars)
    D_solver = tf.train.AdamOptimizer(lr_d, args.beta1,
                                      args.beta2).minimize(D_loss,
                                                           var_list=Dis_vars)

    saver = tf.train.Saver(max_to_keep=5)
    sess = tensorflow_session()

    if args.restore_path != '':
        print('resotre weights from {}'.format(args.restore_path))
        saver.restore(sess, tf.train.latest_checkpoint(args.restore_path))
        print('Load weights finished!!!')
    else:
        sess.run(tf.global_variables_initializer())

    print('Getting training HQ data...')
    image_batch_train = get_train_data(sess,
                                       data_dir=args.data_dir_train,
                                       batch_size=args.batch_size)
    image_batch_test = get_train_data(sess,
                                      data_dir=args.data_dir_test,
                                      batch_size=args.batch_size)

    if not os.path.exists(args.log_dir):
        os.mkdir(args.log_dir)

    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)

    for it in range(120000):

        latent_z = np.random.randn(args.batch_size,
                                   args.z_dim).astype(np.float32)
        batch_images_train = sess.run(image_batch_train)
        batch_images_train = adjust_dynamic_range(
            batch_images_train.astype(np.float32), [0, 255], [0., 1.])

        feed_dict_1 = {
            real: batch_images_train,
            z: latent_z,
            lr_g: args.learning_rate * 10,
            lr_d: args.learning_rate * 0.1
        }
        _, d_loss_real, d_loss_fake = sess.run(
            [D_solver, D_loss_real, D_loss_fake], feed_dict=feed_dict_1)
        _, g_loss_img, g_loss_z = sess.run([G_solver, G_loss_img, G_loss_z],
                                           feed_dict=feed_dict_1)

        if it % 50 == 0:
            print(
                'Iter: {}  g_loss_img: {} g_loss_z: {} d_r_loss: {} d_f_loss_: {}'
                .format(it, g_loss_img, g_loss_z, d_loss_real, d_loss_fake))

            if it % 1000 == 0:
                samples1 = sess.run(G_x, feed_dict={z: latent_z})
                samples1 = adjust_dynamic_range(samples1.transpose(0, 2, 3, 1),
                                                drange_in=[0, 1],
                                                drange_out=[-1, 1])
                imwrite(immerge(samples1[:36, :, :, :], 6, 6),
                        '%s/epoch_%d_sampling.png' % (args.log_dir, it))

                batch_images_test = sess.run(image_batch_test)
                batch_images_test = adjust_dynamic_range(
                    batch_images_test.astype(np.float32), [0, 255], [0., 1.])

                recon = sess.run(Reconstruction,
                                 feed_dict={real: batch_images_test})
                recon = adjust_dynamic_range(recon.transpose(0, 2, 3, 1),
                                             drange_in=[0, 1],
                                             drange_out=[-1, 1])
                imwrite(immerge(recon[:36, :, :, :], 6, 6),
                        '%s/epoch_%d_recon.png' % (args.log_dir, it))

                batch_images = adjust_dynamic_range(
                    batch_images_test.transpose(0, 2, 3, 1),
                    drange_in=[0, 1],
                    drange_out=[-1, 1])
                imwrite(immerge(batch_images[:36, :, :, :], 6, 6),
                        '%s/epoch_%d_orin.png' % (args.log_dir, it))

        if np.mod(it, 10000) == 0 and it > 50000:
            saver.save(sess, args.checkpoint_dir, global_step=it)
Пример #15
0
def sample(sess, Z, generator, save_dir):
    for it in tqdm(range(3)):
        samples1 = sess.run(generator, feed_dict={Z: np.random.randn(args.batch_size*2, args.z_dim)})
        samples1 = samples1.transpose(0, 2, 3, 1)
        samples1 = np.clip(samples1, -1., 1.)
        imwrite(immerge(samples1[:16, :, :, :], 2, 8), '%s/iter_%06d_sampling.png' % (save_dir, it))
def main(epoch, batch_size, lr, z_dim, bottle_dim, i_c, alpha, n_critic,
         gpu_id, data_pool):

    with tf.device('/gpu:%d' % gpu_id):  #Placing the ops under devices

        generator = models.generator  #Generator Object
        discriminator = models.discriminator_wgan_gp  #Discriminator Object

        # inputs Placeholders
        real = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
        z = tf.placeholder(tf.float32, shape=[None, z_dim])

        # generate fake data with the generator
        fake = generator(z, reuse=False)

        # Obtaining scores , means and stds for real and fake data from the discriminator
        r_logit, r_mus, r_sigmas = discriminator(real,
                                                 reuse=False,
                                                 gen_train=False,
                                                 bottleneck_dim=bottle_dim)
        f_logit, f_mus, f_sigmas = discriminator(fake,
                                                 gen_train=False,
                                                 bottleneck_dim=bottle_dim)

        #Obtaining wasserstein loss and gradient penalty losses to train the discriminator
        wasserstein_d = losses.wgan_loss(r_logit, f_logit)
        gp = losses.gradient_penalty(real, fake, discriminator)

        #We obtain the bottleneck loss in the discriminator
        #Inputs to this function are bottleneck layer mus and stds for both real and fake data. i_c is the
        #the information constriant or upperbound. This is an important paramters
        bottleneck_loss=losses._bottleneck_loss(real_mus=r_mus, fake_mus=f_mus,\
            real_sigmas=r_sigmas,fake_sigmas=f_sigmas,i_c=i_c)

        #This used in lagrangian multiplier optimization. This is paramters also get updated adaptivly.
        #To read more about duel gradient desenet in deep learning please read - https://medium.com/@jonathan_hui/rl-dual-gradient-descent-fac524c1f049
        #Initialize with the zero

        beta = tf.Variable(tf.zeros([]), name="beta")

        #Combined both losses (10 is the default hyper paramters given by the paper
        # - https://arxiv.org/pdf/1704.00028.pdf )
        d_loss = -wasserstein_d + gp * 10.0 + beta * bottleneck_loss

        #We said b also should adaptively get updated. Here we maximize the beta paramters with follwoing function
        #Please refer to the VDB paper's equation (9) understand more about the update
        beta_new = tf.maximum(0.0, beta + alpha * bottleneck_loss)

        #This is the main difference from the pytoch implementation. In tensorlfow we have a static graph. S
        # to update the beta with above menitoned function we have to use tf.assign()
        assign_op = tf.assign(beta, beta_new)  #beta.assign(beta_new)

        #This is the generator loss
        #As described in the paper we have a simple loss to the generator which uses mean scores from
        #the generated samples
        f_logit_gen, f_mus_gen, f_sigmas_gen = discriminator(
            fake, gen_train=True, bottleneck_dim=bottle_dim)
        g_loss = -tf.reduce_mean(f_logit_gen)

        #Assigning two optimizers to train both Generator and the Discriminator
        d_var = tf.trainable_variables('discriminator')
        g_var = tf.trainable_variables('generator')

        d_step = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(
            d_loss, var_list=d_var)
        g_step = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(
            g_loss, var_list=g_var)

    # Tensorbored summaries for  plot losses
    wd = wasserstein_d
    d_summary = utils.summary({wd: 'wd', gp: 'gp'})
    g_summary = utils.summary({g_loss: 'g_loss'})
    beta_summary = utils.b_summary(beta)
    #beta_summary = utils.summary({beta: 'beta'})

    #sess= tf.Session()

    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=False)
    with tf.Session(config=config) as sess:
        # iteration counter
        it_cnt, update_cnt = utils.counter()
        # saver
        saver = tf.train.Saver(
            max_to_keep=5
        )  #Use to save both generator and discriminator paramters
        # summary writer
        summary_writer = tf.summary.FileWriter('./summaries/mnist_wgan_gp',
                                               sess.graph)
        ''' Checking for previuosly trained checkpints'''
        ckpt_dir = './checkpoints/mnist_wgan_gp'
        utils.mkdir(ckpt_dir + '/')
        if not utils.load_checkpoint(ckpt_dir, sess):
            sess.run(tf.global_variables_initializer())

        #Starting the training loop
        batch_epoch = len(data_pool) // (batch_size * n_critic)
        max_it = epoch * batch_epoch
        for it in range(sess.run(it_cnt), max_it):
            sess.run(update_cnt)

            # which epoch
            epoch = it // batch_epoch
            it_epoch = it % batch_epoch + 1

            # train D
            for i in range(
                    n_critic
            ):  #Fist we train the discriminator for few iterations (Here I used only 1)
                # batch data
                real_ipt = data_pool.batch('img')  #Read data batch
                z_ipt = np.random.normal(size=[batch_size,
                                               z_dim])  #Sample nosice input

                d_summary_opt, _ = sess.run([d_summary, d_step],
                                            feed_dict={
                                                real: real_ipt,
                                                z: z_ipt
                                            })  #Discriminator Gradient Update
                beta_summary_opt = sess.run(beta_summary)
                #_ = sess.run([d_step], feed_dict={real: real_ipt, z: z_ipt})
                sess.run([assign_op], feed_dict={
                    real: real_ipt,
                    z: z_ipt
                })  #Adpatively update the beta parameter

            summary_writer.add_summary(d_summary_opt, it)
            summary_writer.add_summary(beta_summary_opt, it)

            # train the geenrator (Here we have a simple generator as in normal Wgan)
            z_ipt = np.random.normal(size=[batch_size, z_dim])
            g_summary_opt, _ = sess.run([g_summary, g_step],
                                        feed_dict={z: z_ipt})
            #_ = sess.run([g_step], feed_dict={z: z_ipt})
            summary_writer.add_summary(g_summary_opt, it)

            # display training progress
            if it % 100 == 0:
                print("Epoch: (%3d) (%5d/%5d)" %
                      (epoch, it_epoch, batch_epoch))

            # saving the checpoints after every 1000 interation
            if (it + 1) % 1000 == 0:
                save_path = saver.save(
                    sess, '%s/Epoch_(%d)_(%dof%d).ckpt' %
                    (ckpt_dir, epoch, it_epoch, batch_epoch))
                print('Model saved in file: % s' % save_path)

            #This is to save the  image generation during the trainign as tiles
            if (it + 1) % 100 == 0:
                z_input_sample = np.random.normal(size=[100,
                                                        z_dim])  #Noise samples
                f_sample = generator(z)
                f_sample_opt = sess.run(f_sample,
                                        feed_dict={z: z_input_sample})

                save_dir = './sample_images_while_training/mnist_wgan_gp'
                utils.mkdir(save_dir + '/')
                utils.imwrite(
                    utils.immerge(f_sample_opt, 10,
                                  10), '%s/Epoch_(%d)_(%dof%d).jpg' %
                    (save_dir, epoch, it_epoch, batch_epoch))
Пример #17
0
            save_path = saver.save(
                sess, '%s/Epoch_(%d)_(%dof%d).ckpt' %
                (ckpt_dir, epoch, it_epoch, batch_epoch))
            print('Model saved in file: % s' % save_path)

        # sample
        if (it + 1) % 100 == 0:
            img_sample_opt = sess.run(img_sample,
                                      feed_dict={z_sample: z_ipt_sample})
            img_rec_sample_opt = sess.run(img_rec_sample,
                                          feed_dict={img: img_ipt_sample})

            save_dir = './sample_images_while_training/mnist_generate'
            utils.mkdir(save_dir + '/')
            utils.imwrite(
                utils.immerge(img_sample_opt, 10,
                              10), '%s/Epoch_(%d)_(%dof%d).jpg' %
                (save_dir, epoch, it_epoch, batch_epoch))

            save_dir = './sample_images_while_training/mnist_reconstruct'
            utils.mkdir(save_dir + '/')
            n_grid = int(np.ceil(batch_size**0.5))
            img_ori = utils.immerge(img_ipt_sample, n_grid, n_grid)
            img_rec = utils.immerge(img_rec_sample_opt, n_grid, n_grid)
            img_mer = np.concatenate((img_ori, img_rec), 1)
            utils.imwrite(
                img_mer, '%s/Epoch_(%d)_(%dof%d).jpg' %
                (save_dir, epoch, it_epoch, batch_epoch))

except Exception, e:
    traceback.print_exc()
finally:
Пример #18
0
    
            # which epoch
            epoch = it // batch_epoch
            it_epoch = it % batch_epoch + 1
            it_batch = it + (target_num*15)
            
            ch_input_ipt_start += ch_input_ipt_sample_delta
            ch_input_ipt_start = standardization(ch_input_ipt_start)*4
    
    
            # display
            if it % 1 == 0:
                print("Epoch: (%3d) (%5d/%5d)" % (epoch, it_epoch, it_batch))
    
    
            # sample
            if (it + 1) % 1 == 0:
                f_sample_opt = sess.run(f_sample, feed_dict={z: z_ipt_sample, ch_input: ch_input_ipt_start, ch_mask:ch_mask_ipt_sample})
    
                save_dir = './sample_images_while_training/' + dir_name
                utils.mkdir(save_dir + '/')
                concat_img = utils.immerge(f_sample_opt, 8, 8)
                image_list.append(concat_img)
                utils.imwrite(concat_img, '%s/%03d.png' % (save_dir, it_batch))

except Exception:
    traceback.print_exc()
finally:
    print(" [*] Close main session!")
    sess.close()
Пример #19
0
def training_loop(
        submit_config,
        Encoder_args={},
        E_opt_args={},
        D_opt_args={},
        E_loss_args={},
        D_loss_args={},
        lr_args=EasyDict(),
        tf_config={},
        dataset_args=EasyDict(),
        decoder_pkl=EasyDict(),
        drange_data=[0, 255],
        drange_net=[
            -1, 1
        ],  # Dynamic range used when feeding image data to the networks.
        mirror_augment=False,
        resume_run_id=None,  # Run ID or network pkl to resume training from, None = start from scratch.
        resume_snapshot=None,  # Snapshot index to resume training from, None = autodetect.
        image_snapshot_ticks=1,  # How often to export image snapshots?
        network_snapshot_ticks=10,  # How often to export network snapshots?
        save_tf_graph=False,  # Include full TensorFlow computation graph in the tfevents file?
        save_weight_histograms=False,  # Include weight histograms in the tfevents file?
        max_iters=150000,
        E_smoothing=0.999):

    tflib.init_tf(tf_config)

    with tf.name_scope('input'):
        real_train = tf.placeholder(tf.float32, [
            submit_config.batch_size, 3, submit_config.image_size,
            submit_config.image_size
        ],
                                    name='real_image_train')
        real_test = tf.placeholder(tf.float32, [
            submit_config.batch_size_test, 3, submit_config.image_size,
            submit_config.image_size
        ],
                                   name='real_image_test')
        real_split = tf.split(real_train,
                              num_or_size_splits=submit_config.num_gpus,
                              axis=0)

    with tf.device('/gpu:0'):
        if resume_run_id is not None:
            network_pkl = misc.locate_network_pkl(resume_run_id,
                                                  resume_snapshot)
            print('Loading networks from "%s"...' % network_pkl)
            E, G, D, Gs, NE = misc.load_pkl(network_pkl)
            start = int(network_pkl.split('-')[-1].split('.')
                        [0]) // submit_config.batch_size
        else:
            print('Constructing networks...')
            G, D, Gs, NE = misc.load_pkl(decoder_pkl.decoder_pkl)
            E = tflib.Network('E',
                              size=submit_config.image_size,
                              filter=64,
                              filter_max=1024,
                              phase=True,
                              **Encoder_args)
            start = 0

    Gs.print_layers()
    E.print_layers()
    D.print_layers()

    global_step = tf.Variable(start,
                              trainable=False,
                              name='learning_rate_step')
    learning_rate = tf.train.exponential_decay(lr_args.learning_rate,
                                               global_step,
                                               lr_args.decay_step,
                                               lr_args.decay_rate,
                                               staircase=lr_args.stair)
    add_global = global_step.assign_add(1)
    E_opt = tflib.Optimizer(name='TrainE',
                            learning_rate=learning_rate,
                            **E_opt_args)
    D_opt = tflib.Optimizer(name='TrainD',
                            learning_rate=learning_rate,
                            **D_opt_args)

    E_loss_rec = 0.
    E_loss_adv = 0.
    D_loss_real = 0.
    D_loss_fake = 0.
    D_loss_grad = 0.
    for gpu in range(submit_config.num_gpus):
        print('build graph on gpu %s' % str(gpu))
        with tf.name_scope('GPU%d' % gpu), tf.device('/gpu:%d' % gpu):
            E_gpu = E if gpu == 0 else E.clone(E.name + '_shadow')
            D_gpu = D if gpu == 0 else D.clone(D.name + '_shadow')
            G_gpu = Gs if gpu == 0 else Gs.clone(Gs.name + '_shadow')
            perceptual_model = PerceptualModel(
                img_size=[submit_config.image_size, submit_config.image_size],
                multi_layers=False)
            real_gpu = process_reals(real_split[gpu], mirror_augment,
                                     drange_data, drange_net)
            with tf.name_scope('E_loss'), tf.control_dependencies(None):
                E_loss, recon_loss, adv_loss = dnnlib.util.call_func_by_name(
                    E=E_gpu,
                    G=G_gpu,
                    D=D_gpu,
                    perceptual_model=perceptual_model,
                    reals=real_gpu,
                    **E_loss_args)
                E_loss_rec += recon_loss
                E_loss_adv += adv_loss
            with tf.name_scope('D_loss'), tf.control_dependencies(None):
                D_loss, loss_fake, loss_real, loss_gp = dnnlib.util.call_func_by_name(
                    E=E_gpu, G=G_gpu, D=D_gpu, reals=real_gpu, **D_loss_args)
                D_loss_real += loss_real
                D_loss_fake += loss_fake
                D_loss_grad += loss_gp
            with tf.control_dependencies([add_global]):
                E_opt.register_gradients(E_loss, E_gpu.trainables)
                D_opt.register_gradients(D_loss, D_gpu.trainables)

    E_loss_rec /= submit_config.num_gpus
    E_loss_adv /= submit_config.num_gpus
    D_loss_real /= submit_config.num_gpus
    D_loss_fake /= submit_config.num_gpus
    D_loss_grad /= submit_config.num_gpus

    E_train_op = E_opt.apply_updates()
    D_train_op = D_opt.apply_updates()

    #Es_update_op = Es.setup_as_moving_average_of(E, beta=E_smoothing)

    print('building testing graph...')
    fake_X_val = test(E, Gs, real_test, submit_config)

    sess = tf.get_default_session()

    print('Getting training data...')
    image_batch_train = get_train_data(sess,
                                       data_dir=dataset_args.data_train,
                                       submit_config=submit_config,
                                       mode='train')
    image_batch_test = get_train_data(sess,
                                      data_dir=dataset_args.data_test,
                                      submit_config=submit_config,
                                      mode='test')

    summary_log = tf.summary.FileWriter(submit_config.run_dir)
    if save_tf_graph:
        summary_log.add_graph(tf.get_default_graph())
    if save_weight_histograms:
        E.setup_weight_histograms()
        D.setup_weight_histograms()

    cur_nimg = start * submit_config.batch_size
    cur_tick = 0
    tick_start_nimg = cur_nimg
    start_time = time.time()

    print('Optimization starts!!!')
    for it in range(start, max_iters):

        feed_dict = {real_train: sess.run(image_batch_train)}
        sess.run([E_train_op, E_loss_rec, E_loss_adv], feed_dict)
        sess.run([D_train_op, D_loss_real, D_loss_fake, D_loss_grad],
                 feed_dict)

        cur_nimg += submit_config.batch_size

        if it % 100 == 0:
            print("Iter: %06d  kimg: %-8.1f time: %-12s" %
                  (it, cur_nimg / 1000,
                   dnnlib.util.format_time(time.time() - start_time)))
            sys.stdout.flush()
            tflib.autosummary.save_summaries(summary_log, it)

        if cur_nimg >= tick_start_nimg + 65000:
            cur_tick += 1
            tick_start_nimg = cur_nimg

            if cur_tick % image_snapshot_ticks == 0:
                batch_images_test = sess.run(image_batch_test)
                batch_images_test = misc.adjust_dynamic_range(
                    batch_images_test.astype(np.float32), [0, 255], [-1., 1.])

                samples2 = sess.run(fake_X_val,
                                    feed_dict={real_test: batch_images_test})
                samples2 = samples2.transpose(0, 2, 3, 1)
                batch_images_test = batch_images_test.transpose(0, 2, 3, 1)
                orin_recon = np.concatenate([batch_images_test, samples2],
                                            axis=0)
                imwrite(immerge(orin_recon, 2, submit_config.batch_size_test),
                        '%s/iter_%08d.png' % (submit_config.run_dir, cur_nimg))

            if cur_tick % network_snapshot_ticks == 0:
                pkl = os.path.join(submit_config.run_dir,
                                   'network-snapshot-%08d.pkl' % (cur_nimg))
                misc.save_pkl((E, G, D, Gs, NE), pkl)

    misc.save_pkl((E, G, D, Gs, NE),
                  os.path.join(submit_config.run_dir, 'network-final.pkl'))
    summary_log.close()
Пример #20
0
        if it % 1 == 0:
            cost_time = time() - str_time
            print("Epoch:%4d  Iteration:%5d   Batch:%3d/%3d   Time:%.3f" %
                  (epoch, it, it_epoch, batch_epoch, cost_time))

        # save
        if (it + 1) % save_per_it == 0 or it == max_it - 1:
            save_name = "Epoch_" + str(epoch).zfill(4) + "_It_" + str(
                it).zfill(5)
            save_path = saver.save(sess, ckpt_path + "/" + save_name)
            print('Checkpoint saved in: % s' % save_path)

        # sample
        if (it + 1) % output_sample_per_it == 0 or it == 0 or it == max_it - 1:
            f_sample_opt = sess.run(f_sample, feed_dict={z: z_ipt_sample})

            # sample_name = "Epoch_" + str(epoch).zfill(4) + "_It_" + str(it).zfill(5) + "_" + str(it_epoch) + "of" + str(batch_epoch)
            sample_name = "Epoch_" + str(epoch).zfill(4) + "_It_" + str(
                it).zfill(5)

            utils.imwrite(
                utils.immerge(f_sample_opt, output_num_sqrt, output_num_sqrt),
                sample_path + "/" + sample_name + ".jpg")
            print('Sample saved in: % s' % sample_path)

except Exception as e:
    traceback.print_exc()
finally:
    print(" [*] Close main session!")
    sess.close()
Пример #21
0
            d_summary_opt, _ = sess.run([d_summary, d_step], feed_dict={real: real_ipt, z: z_ipt})
        summary_writer.add_summary(d_summary_opt, it)

        # train G
        z_ipt = np.random.normal(size=[batch_size, z_dim])
        g_summary_opt, _ = sess.run([g_summary, g_step], feed_dict={z: z_ipt})
        summary_writer.add_summary(g_summary_opt, it)

        # display
        if it % 100 == 0:
            print("Epoch: (%3d) (%5d/%5d)" % (epoch, it_epoch, batch_epoch))

        # save
        if (it + 1) % 1000 == 0:
            save_path = saver.save(sess, '%s/Epoch_(%d)_(%dof%d).ckpt' % (ckpt_dir, epoch, it_epoch, batch_epoch))
            print('Model saved in file: % s' % save_path)

        # sample
        if (it + 1) % 100 == 0:
            f_sample_opt = sess.run(f_sample, feed_dict={z: z_ipt_sample})

            save_dir = './sample_images_while_training/celeba_wgan_gp'
            utils.mkdir(save_dir + '/')
            utils.imwrite(utils.immerge(f_sample_opt, 10, 10), '%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, epoch, it_epoch, batch_epoch))

except Exception, e:
    traceback.print_exc()
finally:
    print(" [*] Close main session!")
    sess.close()
Пример #22
0
def main():
    """Main function."""
    args = parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
    tf_config = {'rnd.np_random_seed': 1000}
    tflib.init_tf(tf_config)
    assert os.path.exists(args.restore_path)
    E, _, _, Gs, NE = load_pkl(args.restore_path)
    num_layers, latent_dim = Gs.components.synthesis.input_shape[1:3]

    # Building graph
    # Interpolation on z
    real = tf.placeholder('float32',
                          [None, 3, args.image_size, args.image_size],
                          name='real_image')
    Z = tf.placeholder('float32', [None, latent_dim], name='Gaussian')
    latent_w = E.get_output_for(real, phase=False)
    latent_z = NE.get_output_for(latent_w, None)
    reconstruction_from_z = Gs.get_output_for(Z,
                                              None,
                                              randomize_noise=False,
                                              is_validation=True)
    # Interpolation on w
    W = tf.tile(Z[:, np.newaxis], [1, num_layers, 1])
    reconstruction_from_w = Gs.components.synthesis.get_output_for(
        W, randomize_noise=False)
    sess = tf.get_default_session()

    # Using ND's weights to setting NE's weights (invertible)
    ND_vars_pairs = {
        name: tflib.run(val)
        for name, val in Gs.components.mapping.vars.items()
    }
    for ne_name, ne_val in NE.vars.items():
        tflib.set_vars({ne_val: ND_vars_pairs[ne_name]})

    # Preparing data
    input_images, images_name = preparing_data(im_path=args.data_dir_test,
                                               img_type=args.img_type)

    if args.mode == 0:
        save_dir = args.output_dir or './outputs/interpolation_on_z'
        os.makedirs(save_dir, exist_ok=True)

        print('Interpolation on z space...')
        for i in tqdm(range(input_images.shape[0])):
            source_image = input_images[i:i + 1]
            source_name = images_name[i]
            for j in range(input_images.shape[0]):
                target_image = input_images[j:j + 1]
                target_name = images_name[j]
                source_code = sess.run(latent_z,
                                       feed_dict={real: source_image})
                target_code = sess.run(latent_z,
                                       feed_dict={real: target_image})

                codes = linear_interpolate(src_code=source_code,
                                           dst_code=target_code,
                                           step=args.step)

                inputs = np.zeros((args.batch_size, latent_dim), np.float32)
                output_images = []
                for idx in range(0, args.step, args.batch_size):
                    batch = codes[idx:idx + args.batch_size]
                    inputs[0:len(batch)] = batch
                    images = sess.run(reconstruction_from_z,
                                      feed_dict={Z: inputs})
                    output_images.append(images[0:len(batch)])
                output_images = np.concatenate(output_images, axis=0)
                final_results = np.concatenate(
                    [source_image, output_images, target_image], axis=0)
                final_results = final_results.transpose(0, 2, 3, 1)
                imwrite(
                    immerge(final_results, 1, args.step + 2),
                    '%s/%s_to_%s.png' % (save_dir, source_name, target_name))

    elif args.mode == 1:
        save_dir = args.output_dir or './outputs/interpolation_on_w'
        os.makedirs(save_dir, exist_ok=True)

        print('Interpolation on w space...')
        for i in tqdm(range(input_images.shape[0])):
            source_image = input_images[i:i + 1]
            source_name = images_name[i]
            for j in range(input_images.shape[0]):
                target_image = input_images[j:j + 1]
                target_name = images_name[j]
                source_code = sess.run(latent_w,
                                       feed_dict={real: source_image})
                target_code = sess.run(latent_w,
                                       feed_dict={real: target_image})

                codes = linear_interpolate(src_code=source_code,
                                           dst_code=target_code,
                                           step=args.step)

                inputs = np.zeros((args.batch_size, latent_dim), np.float32)
                output_images = []
                for idx in range(0, args.step, args.batch_size):
                    batch = codes[idx:idx + args.batch_size]
                    inputs[0:len(batch)] = batch
                    images = sess.run(reconstruction_from_w,
                                      feed_dict={Z: inputs})
                    output_images.append(images[0:len(batch)])
                output_images = np.concatenate(output_images, axis=0)
                final_results = np.concatenate(
                    [source_image, output_images, target_image], axis=0)
                final_results = final_results.transpose(0, 2, 3, 1)
                imwrite(
                    immerge(final_results, 1, args.step + 2),
                    '%s/%s_to_%s.png' % (save_dir, source_name, target_name))
    else:
        raise ValueError('Invalid mode!')
Пример #23
0
        g_summary_opt, _ = sess.run([g_summary, g_step], feed_dict={z: z_ipt})
        summary_writer.add_summary(g_summary_opt, it)

        # display
        if it % 1 == 0:
            print("Epoch: (%3d) (%5d/%5d)" % (epoch, it_epoch, batch_epoch))

        # save
        if (it + 1) % 1000 == 0:
            save_path = saver.save(
                sess, '%s/Epoch_(%d)_(%dof%d).ckpt' %
                (ckpt_dir, epoch, it_epoch, batch_epoch))
            print('Model saved in file: % s' % save_path)

        # sample
        if (it + 1) % 100 == 0:
            f_sample_opt = sess.run(f_sample, feed_dict={z: z_ipt_sample})

            save_dir = './sample_images_while_training/' + dir_name
            utils.mkdir(save_dir + '/')
            utils.imwrite(
                utils.immerge(f_sample_opt, 5,
                              5), '%s/Epoch_(%d)_(%dof%d).png' %
                (save_dir, epoch, it_epoch, batch_epoch))

except Exception:
    traceback.print_exc()
finally:
    print(" [*] Close main session!")
    sess.close()
Пример #24
0
def train():

    graph = tf.Graph()

    with graph.as_default():

        z = tf.placeholder(tf.float32, shape=[64, 100], name='z')

        img_batch = read_records.read_and_decode(
            'tf_records/cartoon.tfrecords', batch_size=batch_size)
        #generator
        # fake=models.generator(z, stddev=0.02, alpha=alpha, name='generator', reuse=False)
        #
        # #discriminator
        # dis_real=models.discriminator(img_batch , alpha=alpha, batch_size=batch_size)
        # dis_fake=models.discriminator(fake,  alpha=alpha, reuse=True)

        #generator
        fake = models.generator(z, reuse=False)  #, is_training=True

        #discriminator
        dis_real = models.discriminator(img_batch,
                                        reuse=False)  #is_training=True
        dis_fake = models.discriminator(fake, reuse=True)  #,  is_training=True

        # #losses
        # gene_loss = tf.reduce_mean(tf.squared_difference(dis_fake, 0.9))
        # dis_loss = (tf.reduce_mean(tf.squared_difference(dis_real, 0.9))
        #             + tf.reduce_mean(tf.square(dis_fake))) / 2

        gene_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(dis_fake) * 0.9, logits=dis_fake))
        d_f_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.zeros_like(dis_fake), logits=dis_fake))
        d_r_loss = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(
                labels=tf.ones_like(dis_real) * 0.9, logits=dis_real))
        dis_loss = d_f_loss + d_r_loss

        gen_loss_sum = tf.summary.scalar("gen_loss", gene_loss)
        dis_loss_sum = tf.summary.scalar("dis_loss", dis_loss)
        merge_sum_gen = tf.summary.merge([gen_loss_sum])
        merge_sum_dis = tf.summary.merge([dis_loss_sum])

        #variables
        gene_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                     scope='generator')
        dis_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                    scope='discriminator')

        gene_opt = tf.train.AdamOptimizer(
            learning_rate=0.0002, beta1=0.3).minimize(gene_loss,
                                                      var_list=gene_var)
        dis_opt = tf.train.AdamOptimizer(learning_rate=0.0002,
                                         beta1=0.3).minimize(dis_loss,
                                                             var_list=dis_var)

        test_sample = models.generator(z, reuse=True)  #,  is_training=False
        test_out = tf.add(test_sample, 0, 'test_out')

        init = tf.global_variables_initializer()
    print('t')

    with tf.Session(graph=graph) as sess:
        sess.run(init)  # 初始化全局变量

        z_ipt_sample = np.random.normal(size=[batch_size, 100])
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        writer = tf.summary.FileWriter('./tensorboard', sess.graph)
        saver = tf.train.Saver()
        try:
            for i in range(run_nums):
                z_ipt = np.random.normal(size=[batch_size, 100])
                #train D
                #_, dis_loss1 = sess.run([dis_opt,dis_loss],feed_dict={real:img_batch,z:z_ipt})
                sum_dis, _, dis_loss1 = sess.run(
                    [merge_sum_dis, dis_opt, dis_loss], feed_dict={z: z_ipt})
                #train G
                sum_gen, _, gen_loss1 = sess.run(
                    [merge_sum_gen, gene_opt, gene_loss], feed_dict={z: z_ipt})

                if i % 400 == 0:
                    print(i)
                    test_sample_opt = sess.run(test_sample,
                                               feed_dict={z: z_ipt_sample})
                    #print(type(test_sample_opt),test_sample_opt.shape)
                    utils.mkdir('out_cartoon')
                    utils.imwrite(utils.immerge(test_sample_opt, 10, 10),
                                  'out_cartoon/' + str(i) + '.jpg')
                # writer.add_summary(sum_dis, i)
                #writer.add_summary(sum_gen, i)
            print("train end!!!")

        except tf.errors.OutOfRangeError:
            print('out of range')
        finally:
            coord.request_stop()

        coord.request_stop()
        coord.join(threads)
        writer.close()
        saver.save(sess, "./checkpoints/DCGAN")
Пример #25
0
import tensorflow as tf
import utils
import numpy as np

with tf.Session() as sess:
    saver = tf.train.import_meta_graph('checkpoints/DCGAN.meta')
    saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))

   # graph=tf.get_default_graph()
    #z=graph.get_tensor_by_name('z:0')
   # sample_out=graph.get_tensor_by_name('test_out:0')
    z = sess.graph.get_tensor_by_name('z:0')
    sample_out = sess.graph.get_tensor_by_name('test_out:0')

    z_ipt_sample = np.random.normal(size=[64, 100])
    test_sample_opt = sess.run(sample_out, feed_dict={z: z_ipt_sample})
    utils.mkdir('infer_cartoon')
    utils.imwrite(utils.immerge(test_sample_opt, 10, 10), 'infer_cartoon/' + 'test.jpg')

def saveSampleImgs(imgs, full_path, row, column):
    utils.imwrite(utils.immerge(imgs, row, column),full_path)
Пример #27
0
        
        # The last two elements in the dataset are used for visualization, so I crop the dataset just in case
        # TODO: fix this
        for batch in range(ds_length - 10):
            it += 1
            it_in_epoch += 1

            # train
            summary_opt, _ = sess.run([summary, step])
            for summary_opt_i in summary_opt: summary_writer.add_summary(summary_opt_i, it)

            # display
            if (it + 1) % 100 == 0:
                print("Epoch: (%3d) (%5d/%5d)" % (ep, it_in_epoch, it_per_epoch))

            # sample
            if (it + 1) % 1000 == 0:
                save_dir = './output/%s/sample_training' % experiment_name
                os.makedirs(save_dir, exist_ok=True)

                img_rec_opt_sample, img_data = sess.run([img_rec_sample, img])
                ipt_rec = np.concatenate((img_data, img_rec_opt_sample), axis=2).squeeze()
                img_opt_sample = sess.run(img_sample, feed_dict={z_sample: z_ipt_sample}).squeeze()

                # TODO remove immerge
                imageio.imwrite('%s/Epoch_(%d)_img_rec.jpg' % (save_dir, ep), utils.immerge(ipt_rec) / 2 + 0.5)

                imageio.imwrite('%s/Epoch_(%d)_img_sample.jpg' % (save_dir, ep), utils.immerge(img_opt_sample) / 2 + 0.5)
finally:
    sess.close()
Пример #28
0
        g_summary_opt, _ = sess.run([g_summary, g_step], feed_dict={z: z_ipt})
        summary_writer.add_summary(g_summary_opt, it)

        # display
        if it % 1 == 0:
            print("Epoch: (%3d) (%5d/%5d)" % (epoch, it_epoch, batch_epoch))

        # save
        if (it + 1) % 1000 == 0:
            save_path = saver.save(
                sess, '%s/Epoch_(%d)_(%dof%d).ckpt' %
                (ckpt_dir, epoch, it_epoch, batch_epoch))
            print('Model saved in file: % s' % save_path)

        # sample
        if (it + 1) % 100 == 0:
            f_sample_opt = sess.run(f_sample, feed_dict={z: z_ipt_sample})

            save_dir = './sample_images_while_training/celeba_wgan'
            utils.mkdir(save_dir + '/')
            utils.imwrite(
                utils.immerge(f_sample_opt, 10,
                              10), '%s/Epoch_(%d)_(%dof%d).jpg' %
                (save_dir, epoch, it_epoch, batch_epoch))

except Exception as e:
    traceback.print_exc()
finally:
    print(" [*] Close main session!")
    sess.close()
Пример #29
0
def main():
    args = get_args()
    a_r = tf.placeholder(shape=[None, args.crop_size, args.crop_size, 3],
                         dtype=tf.float32)
    b_r = tf.placeholder(shape=[None, args.crop_size, args.crop_size, 3],
                         dtype=tf.float32)
    a2b_s = tf.placeholder(shape=[None, args.crop_size, args.crop_size, 3],
                           dtype=tf.float32)
    b2a_s = tf.placeholder(shape=[None, args.crop_size, args.crop_size, 3],
                           dtype=tf.float32)
    cvt, sum_, tr_op = build_graph(args, a_r, b_r, a2b_s, b2a_s)
    a2b, b2a, a2b2a, b2a2b = cvt
    g_sum, d_sum_a, d_sum_b = sum_
    g_tr_op, d_tr_op_a, d_tr_op_b = tr_op
    # create a session
    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    it_cnt, update_cnt = ops.counter()
    # get data
    a_dr, b_dr, a_te_dr, b_te_dr = get_data_reader(args)
    a2b_pool = utils.item_pool()
    b2a_pool = utils.item_pool()
    # create summary writer
    summary_writer = tf.summary.FileWriter('summaries/' + args.dataset,
                                           sess.graph)
    # create saver
    ckpt_dir = 'checkpoints/' + args.dataset
    utils.mkdir(ckpt_dir + '/')
    saver = tf.train.Saver(max_to_keep=5)
    ckpt_path = ops.load_checkpoint(ckpt_dir, sess, saver)
    if ckpt_path is None:
        sess.run(tf.global_variables_initializer())
    else:
        print('Copy variables from {}'.format(ckpt_path))

    try:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        it_every_epoch = min(len(a_dr), len(b_dr)) // args.batch_size
        max_it = args.epoch * it_every_epoch
        start_it = sess.run(it_cnt)
        for it in range(start_it, max_it):
            sess.run(update_cnt)

            a_real = a_dr.read_batch(sess)
            b_real = b_dr.read_batch(sess)
            a2b_opt, b2a_opt = sess.run([a2b, b2a],
                                        feed_dict={
                                            a_r: a_real,
                                            b_r: b_real
                                        })
            a2b_sample_ipt = np.array(a2b_pool(list(a2b_opt)))
            b2a_sample_ipt = np.array(b2a_pool(list(b2a_opt)))

            # train G
            g_summary, _ = sess.run([g_sum, g_tr_op],
                                    feed_dict={
                                        a_r: a_real,
                                        b_r: b_real
                                    })
            summary_writer.add_summary(g_summary, it)
            # train D_b
            d_summary_b, _ = sess.run([d_sum_b, d_tr_op_b],
                                      feed_dict={
                                          b_r: b_real,
                                          a2b_s: a2b_sample_ipt
                                      })
            summary_writer.add_summary(d_summary_b, it)
            # train D_a
            d_summary_a, _ = sess.run([d_sum_a, d_tr_op_a],
                                      feed_dict={
                                          a_r: a_real,
                                          b2a_s: b2a_sample_ipt
                                      })
            summary_writer.add_summary(d_summary_a, it)

            # check epoch
            epoch = it // it_every_epoch
            it_epoch = it % it_every_epoch + 1
            print("Epoch[{}]: [{}/{}]".format(epoch, it_epoch, it_every_epoch))

            if (epoch + 1) % args.save_interval == 0 and it_epoch == 1:
                save_path = saver.save(
                    sess, '%s/Epoch_(%d)_(%dof%d).ckpt' %
                    (ckpt_dir, epoch, it_epoch, it_every_epoch))
                print('[*] Model saved in file: %s' % save_path)

            if (epoch + 1) % args.sample_interval == 0 and it_epoch == 1:
                a_te_r = a_te_dr.read_batch(sess)
                b_te_r = b_te_dr.read_batch(sess)
                a2b_opt, a2b2a_opt, b2a_opt, b2a2b_opt = sess.run(
                    [a2b, a2b2a, b2a, b2a2b],
                    feed_dict={
                        a_r: a_te_r,
                        b_r: b_te_r
                    })
                sample_opt = np.concatenate(
                    (a_te_r, a2b_opt, a2b2a_opt, b_te_r, b2a_opt, b2a2b_opt),
                    axis=0)
                save_dir = 'samples/' + args.dataset
                utils.mkdir(save_dir + '/')
                utils.imwrite(utils.immerge(sample_opt, 2, 3),
                              '{}/Epoch{}.jpg'.format(save_dir, epoch))
                print('[*] Saved sample img to ' +
                      '{}/Epoch{}.jpg'.format(save_dir, epoch))

    except Exception as e:
        coord.request_stop(e)
    finally:
        print("Stop threads and close session!")
        coord.request_stop()
        coord.join(threads)
        sess.close()