コード例 #1
0
def train_noestimator(features,
                      labels,
                      noise_dims=64,
                      batch_size=32,
                      num_steps=1200,
                      num_eval=20,
                      seed=0):
    """ Input features (images) and labels, noise vector dimension, batch size, seed for reproducibility """
    # Input training data and noise
    train_input_fn, train_input_hook = \
            _get_train_input_fn(features, labels, batch_size, noise_dims, seed)
    noise, next_image_batch = train_input_fn()

    # Define GAN model, loss, and optimizers
    model = tfgan.gan_model(generator_fn, discriminator_fn, next_image_batch,
                            noise)
    loss = tfgan.gan_loss(
        model,
        generator_loss_fn=tfgan.losses.wasserstein_generator_loss,
        discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
        gradient_penalty_weight=1.0)
    generator_optimizer = tf.train.AdamOptimizer(0.001, beta1=0.5)
    discriminator_optimizer = tf.train.AdamOptimizer(0.0001, beta1=0.5)
    gan_train_ops = tfgan.gan_train_ops(model, loss, generator_optimizer,
                                        discriminator_optimizer)

    # We'll evaluate images during training to see how the generator improves
    with tf.variable_scope('Generator', reuse=True):
        predict_input_fn = _get_predict_input_fn(num_eval, noise_dims)
        eval_images = model.generator_fn(predict_input_fn(), is_training=False)

    # Train, outputting evaluation occasionally
    train_step_fn = tfgan.get_sequential_train_steps()
    global_step = tf.train.get_or_create_global_step()

    with tf.train.SingularMonitoredSession(hooks=[train_input_hook]) as sess:
        for i in range(num_steps + 1):
            cur_loss, _ = train_step_fn(sess,
                                        gan_train_ops,
                                        global_step,
                                        train_step_kwargs={})
            if i % 400 == 0:
                generated_images = sess.run(eval_images)
                print("Iteration", i, "- Loss:", cur_loss)
                show(generated_images)
コード例 #2
0
def start_train():
    conf.is_training = True
    train_input = data_provider.get_stage_I_train_input_fn()
    condition, real_image = train_input()

    gan_model, gan_loss = get_model_and_loss(condition, real_image)

    gan_train_ops = tfgan.gan_train_ops(
        model=gan_model,
        loss=gan_loss,
        generator_optimizer=generator_optimizer,
        discriminator_optimizer=discriminator_optimizer)

    # generator : discrimination = 1:5
    train_setp_fn = tfgan.get_sequential_train_steps(
        namedtuples.GANTrainSteps(1, 10))

    with tf.Session() as sess:
        # get_saver
        saver = tf.train.Saver()

        if not tf.train.get_checkpoint_state(conf.stageI_model_path):
            init_op = tf.global_variables_initializer()
            sess.run(init_op)
        else:
            saver.restore(sess,
                          tf.train.latest_checkpoint(conf.stageI_model_path))

        train_writer = tf.summary.FileWriter(conf.stageI_model_path,
                                             sess.graph)
        merged = tf.summary.merge_all()
        step = sess.run(global_step)

        with slim.queues.QueueRunners(sess):
            for _ in range(conf.training_steps):
                # test data
                # data = sess.run(real_image)
                # data = visualize_data(data)
                # img = Image.fromarray(data, 'RGB')
                # img.show()
                # data = sess.run(gan_model.generator_inputs)
                # print(data)
                #
                step = step + 1

                cur_loss, _ = train_setp_fn(sess, gan_train_ops, global_step,
                                            {})
                tf.summary.scalar("loss", cur_loss)
                if step % 50 == 0:
                    sumary = sess.run(merged)
                    train_writer.add_summary(sumary, step)

                # save var
                if step % 200 == 0:
                    saver.save(sess, conf.stageI_model_path, global_step)

                # visualize data
                if step % 1000 == 0:
                    gen_data = sess.run(gan_model.generated_data)
                    datas = visualize_data(gen_data)
                    scipy.misc.toimage(datas).save('image/{}.jpg'.format(step))
コード例 #3
0
def run_discgan():
    """ Constructs and trains the discriminative GAN consisting of
        Jerry and Diego.
    """
    # code follows the examples from
    # https://github.com/tensorflow/models/blob/master/research/gan/tutorial.ipynb

    # build the GAN model
    discgan = tfgan.gan_model(
        generator_fn=generator,
        discriminator_fn=adversary_conv(OUTPUT_SIZE),
        real_data=tf.random_uniform(shape=[BATCH_SIZE, OUTPUT_SIZE]),
        generator_inputs=get_input_tensor(BATCH_SIZE, MAX_VAL))
    # Build the GAN loss
    discgan_loss = tfgan.gan_loss(
        discgan,
        generator_loss_fn=tfgan.losses.least_squares_generator_loss,
        discriminator_loss_fn=tfgan.losses.least_squares_discriminator_loss)
    # Create the train ops, which calculate gradients and apply updates to weights.
    train_ops = tfgan.gan_train_ops(discgan,
                                    discgan_loss,
                                    generator_optimizer=GEN_OPT,
                                    discriminator_optimizer=OPP_OPT)
    # start TensorFlow session
    with tf.train.SingularMonitoredSession() as sess:
        pretrain_steps_fn = tfgan.get_sequential_train_steps(
            tfgan.GANTrainSteps(0, PRE_STEPS))
        train_steps_fn = tfgan.get_sequential_train_steps(
            tfgan.GANTrainSteps(1, ADV_MULT))
        global_step = tf.train.get_or_create_global_step()

        # pretrain discriminator
        print('\n\nPretraining ... ', end="", flush=True)
        try:
            pretrain_steps_fn(sess,
                              train_ops,
                              global_step,
                              train_step_kwargs={})
        except KeyboardInterrupt:
            pass
        print('[DONE]\n\n')

        # train both models
        losses_jerry = []
        losses_diego = []
        try:
            evaluate(sess, discgan.generated_data, discgan.generator_inputs, 0,
                     'jerry')

            for step in range(STEPS):
                train_steps_fn(sess,
                               train_ops,
                               global_step,
                               train_step_kwargs={})

                # if performed right number of steps, log
                if step % LOG_EVERY_N == 0:
                    sess.run([])
                    gen_l = discgan_loss.generator_loss.eval(session=sess)
                    disc_l = discgan_loss.discriminator_loss.eval(session=sess)

                    debug.print_step(step, gen_l, disc_l)
                    losses_jerry.append(gen_l)
                    losses_diego.append(disc_l)

        except KeyboardInterrupt:
            print('[INTERRUPTED BY USER] -- evaluating')

        # produce output
        files.write_to_file(losses_jerry, PLOT_DIR + '/jerry_loss.txt')
        files.write_to_file(losses_diego, PLOT_DIR + '/diego_loss.txt')
        evaluate(sess, discgan.generated_data, discgan.generator_inputs, 1,
                 'jerry')
コード例 #4
0
    discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,
    gradient_penalty_weight=1.0)

l1_loss = tf.norm(gan_model.real_data - gan_model.generated_data, ord=1)

gan_loss = tfgan.losses.combine_adversarial_loss(gan_loss, gan_model, l1_loss, weight_factor=FLAGS.weight_factor)

train_ops = tfgan.gan_train_ops(gan_model,gan_loss,generator_optimizer=tf.train.AdamOptimizer(learning_rate=0.001, beta1=0.85, beta2=0.999, epsilon=1e-5),discriminator_optimizer=tf.train.AdamOptimizer(learning_rate=0.000001, beta1=0.85, beta2=0.999, epsilon=1e-5))
#train_ops.global_step_inc_op = tf.train.get_global_step().assign_add(1)


#store_output_and_check_loss(gan_loss, gan_model.generated_data, gan_model.real_data, num_of_samples=3, prefix='gen',logdir=log_folder)

global_step_tensor = tf.Variable(1, trainable=False, name='global_step')
global_step = tf.train.get_or_create_global_step()
train_step_fn = tfgan.get_sequential_train_steps( train_steps=tf.contrib.gan.GANTrainSteps(10, 10))
with monitored_session.MonitoredTrainingSession(checkpoint_dir=log_folder) as session:
    loss = None
    for y in xrange(1,20):
        for x in xrange(0,500):
            cur_loss, _ = train_step_fn(session, train_ops, global_step, train_step_kwargs={})

            gen_loss_np = session.run(gan_loss.generator_loss)
            dis_loss_np = session.run(gan_loss.discriminator_loss)

            if gen_loss_np < 170:
                store_output_and_check_loss(session, gan_loss, gan_model.generated_data,prefix='final_l_'+str(round(gen_loss_np))+ '_' + str(NUMBER_OF_NOTES) + '_gen_', play=False,num_of_samples=30)
            print('iteration:'+ str(y*x))
            print('Generator loss: %f' % gen_loss_np)
            print('Discriminator loss: %f' % dis_loss_np)
コード例 #5
0
ファイル: test3.py プロジェクト: huyoboy/dl
              noise_dims-cont_dim, cont_dim)

display_noises = []
display_noises.append(util.get_eval_noise_categorical(*noise_args))
display_noises.append(util.get_eval_noise_continuous_dim1(*noise_args))
display_noises.append(util.get_eval_noise_continuous_dim2(*noise_args))

display_images = []
for noise in display_noises:
    with tf.variable_scope('Generator', reuse=True):
        display_images.append(infogan_model.generator_fn(noise, is_training=False))

display_img = tfgan.eval.image_reshaper(
    tf.concat(display_images, 0), num_cols=10)

global_step = tf.train.get_or_create_global_step()
train_step_fn = tfgan.get_sequential_train_steps()
loss_values, mnist_score_values  = [], []

with tf.train.SingularMonitoredSession() as sess:
    start_time = time.time()
    for i in xrange(6001):
        cur_loss, _ = train_step_fn(
            sess, gan_train_ops, global_step, train_step_kwargs={})
        loss_values.append((i, cur_loss))
        if i % 1000 == 0:
            mnist_score_np, display_img_np = sess.run([eval_score, display_img])
            mnist_score_values.append((i, mnist_score_np))
            visualize_training_generator(i, start_time, display_img_np)
            print('Current loss: %f' % cur_loss)
            print('Current MNIST score: %f' % mnist_score_values[-1][1])
コード例 #6
0
ファイル: StageII.py プロジェクト: sssste/StackGAN
def start_train():
    stageI_train_input = data_provider.get_stage_I_train_input_fn()
    condition, real_image = stageI_train_input()
    stageI_gan_model, _ = StageI.get_model_and_loss(condition, real_image)
    conf.is_training = True
    need_to_init = False

    condition, real_image = data_provider.get_stage_II_train_input_fn()()

    with tf.Session() as sess:
        # get_saver
        saver = tf.train.Saver()

        if tf.train.get_checkpoint_state(conf.stageII_model_path):
            saver.restore(sess,
                          tf.train.latest_checkpoint(conf.stageII_model_path))
        else:
            if not tf.train.get_checkpoint_state(conf.stageI_model_path):
                raise FileNotFoundError("StageI model not found!")
            else:
                saver.restore(
                    sess, tf.train.latest_checkpoint(conf.stageI_model_path))
                sI_var = tf.global_variables()
                need_to_init = True
                tf.assign(global_step, 0)

        with tf.variable_scope('Generator', reuse=True):
            gen_img = stageI_gan_model.generator_fn(condition)

        # StageI不参与训练

        param = tf.get_collection_ref(tf.GraphKeys.UPDATE_OPS)
        del param[:]

        gen_input = {"gen_img": gen_img, "caption": condition["caption"]}

        stageII_gan_model, gan_loss = get_model_and_loss(gen_input, real_image)

        gan_train_ops = tfgan.gan_train_ops(
            model=stageII_gan_model,
            loss=gan_loss,
            generator_optimizer=generator_optimizer,
            discriminator_optimizer=discriminator_optimizer)

        if need_to_init:
            var_to_init = [x for x in tf.global_variables() if x not in sI_var]
            sess.run(tf.initialize_variables(var_to_init))

        train_setp_fn = tfgan.get_sequential_train_steps(
            namedtuples.GANTrainSteps(1, 10))

        train_writer = tf.summary.FileWriter(conf.stageII_model_path,
                                             sess.graph)
        merged = tf.summary.merge_all()
        step = sess.run(global_step)

        with slim.queues.QueueRunners(sess):
            for _ in range(conf.training_steps):
                # test data
                data = sess.run(real_image)
                data = visualize_data(data)
                img = Image.fromarray(data, 'RGB')
                img.show()
                data = sess.run(stageII_gan_model.generator_inputs)
                print(data)
                #
                step = step + 1

                cur_loss, _ = train_setp_fn(sess, gan_train_ops, global_step,
                                            {})
                tf.summary.scalar("loss", cur_loss)
                if step % 50 == 0:
                    sumary = sess.run(merged)
                    train_writer.add_summary(sumary, step)

                # save var
                if step % 200 == 0:
                    saver.save(sess, conf.stageI_model_path, global_step)

                # visualize data
                if step % 1000 == 0:
                    gen_data = sess.run(gan_model.generated_data)
                    datas = visualize_data(gen_data)
                    scipy.misc.toimage(datas).save('image/{}.jpg'.format(step))