def dcgan_discrim(x_hat, sess):
    dcgan = dcgan_model.DCGAN(sess,
                              image_size=FLAGS.image_size,
                              batch_size=FLAGS.batch_size,
                              output_size=FLAGS.output_size,
                              c_dim=FLAGS.c_dim,
                              dataset_name=FLAGS.dataset,
                              is_crop=FLAGS.is_crop,
                              checkpoint_dir=FLAGS.checkpoint_dir,
                              sample_dir=FLAGS.sample_dir)

    x_hat_image = tf.reshape(x_hat, [1, 64, 64, 3])
    all_zeros = tf.zeros([64, 64, 64, 3])
    discrim_input = all_zeros + x_hat_image
    prob, _ = dcgan.discriminator(discrim_input, is_train=False)
    d_loss = -tf.log(prob[0])

    restore_vars = [
        'd_bn1/beta', 'd_bn1/gamma', 'd_bn1/moving_mean',
        'd_bn1/moving_variance', 'd_bn2/beta', 'd_bn2/gamma',
        'd_bn2/moving_mean', 'd_bn2/moving_variance', 'd_bn3/beta',
        'd_bn3/gamma', 'd_bn3/moving_mean', 'd_bn3/moving_variance',
        'd_h0_conv/biases', 'd_h0_conv/w', 'd_h1_conv/biases', 'd_h1_conv/w',
        'd_h2_conv/biases', 'd_h2_conv/w', 'd_h3_conv/biases', 'd_h3_conv/w',
        'd_h3_lin/Matrix', 'd_h3_lin/bias'
    ]

    restore_dict = {
        var.op.name: var
        for var in tf.global_variables() if var.op.name in restore_vars
    }
    restore_path = tf.train.latest_checkpoint('../models/celebA_64_64/')

    return d_loss, restore_dict, restore_path
def dcgan_gen(z, sess):

    dcgan = dcgan_model.DCGAN(sess,
                              image_size=FLAGS.image_size,
                              batch_size=FLAGS.batch_size,
                              output_size=FLAGS.output_size,
                              c_dim=FLAGS.c_dim,
                              dataset_name=FLAGS.dataset,
                              is_crop=FLAGS.is_crop,
                              checkpoint_dir=FLAGS.checkpoint_dir,
                              sample_dir=FLAGS.sample_dir)

    tf.get_variable_scope().reuse_variables()

    s = dcgan.output_size
    s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)

    # project `z` and reshape
    h0 = tf.reshape(
        dcgan_ops.linear(z, dcgan.gf_dim * 8 * s16 * s16, 'g_h0_lin'),
        [-1, s16, s16, dcgan.gf_dim * 8])
    h0 = tf.nn.relu(dcgan.g_bn0(h0, train=False))

    h1 = dcgan_ops.deconv2d(h0, [dcgan.batch_size, s8, s8, dcgan.gf_dim * 4],
                            name='g_h1')
    h1 = tf.nn.relu(dcgan.g_bn1(h1, train=False))

    h2 = dcgan_ops.deconv2d(h1, [dcgan.batch_size, s4, s4, dcgan.gf_dim * 2],
                            name='g_h2')
    h2 = tf.nn.relu(dcgan.g_bn2(h2, train=False))

    h3 = dcgan_ops.deconv2d(h2, [dcgan.batch_size, s2, s2, dcgan.gf_dim * 1],
                            name='g_h3')
    h3 = tf.nn.relu(dcgan.g_bn3(h3, train=False))

    h4 = dcgan_ops.deconv2d(h3, [dcgan.batch_size, s, s, dcgan.c_dim],
                            name='g_h4')

    x_hat = tf.nn.tanh(h4)

    restore_vars = [
        'g_bn0/beta', 'g_bn0/gamma', 'g_bn0/moving_mean',
        'g_bn0/moving_variance', 'g_bn1/beta', 'g_bn1/gamma',
        'g_bn1/moving_mean', 'g_bn1/moving_variance', 'g_bn2/beta',
        'g_bn2/gamma', 'g_bn2/moving_mean', 'g_bn2/moving_variance',
        'g_bn3/beta', 'g_bn3/gamma', 'g_bn3/moving_mean',
        'g_bn3/moving_variance', 'g_h0_lin/Matrix', 'g_h0_lin/bias',
        'g_h1/biases', 'g_h1/w', 'g_h2/biases', 'g_h2/w', 'g_h3/biases',
        'g_h3/w', 'g_h4/biases', 'g_h4/w'
    ]

    restore_dict = {
        var.op.name: var
        for var in tf.global_variables() if var.op.name in restore_vars
    }
    restore_path = tf.train.latest_checkpoint('../models/celebA_64_64/')

    return x_hat, restore_dict, restore_path
Beispiel #3
0
def main():
    start_time = time.time()  # Clocking start

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as s:
        # DCGAN model
        model = dcgan.DCGAN(s, batch_size=train_step['batch_size'])

        # Load model & Graph & Weights
        ckpt = tf.train.get_checkpoint_state('./model/')
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            model.saver.restore(s, ckpt.model_checkpoint_path)

            global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                '-')[-1]
            print("[+] global step : %s" % global_step, " successfully loaded")
        else:
            global_step = 0
            print('[-] No checkpoint file found')

        # Initializing variables
        s.run(tf.global_variables_initializer())

        # Training, test data set
        dataset = DataSet(input_height=32,
                          input_width=32,
                          input_channel=3,
                          name='cifar-100')
        dataset_iter = DataIterator(dataset.train_images, dataset.train_labels,
                                    train_step['batch_size'])

        sample_x = dataset.valid_images[:model.sample_num].astype(
            np.float32) / 225.
        sample_z = np.random.uniform(-1., 1., [model.sample_num, model.z_dim])

        d_overpowered = False  # G loss > D loss * 2

        step = int(global_step)
        cont = int(step / 750)
        for epoch in range(cont, cont + train_step['epoch']):
            for batch_images, _ in dataset_iter.iterate():
                batch_x = batch_images.astype(np.float32) / 225.
                batch_z = np.random.uniform(
                    -1., 1.,
                    [train_step['batch_size'], model.z_dim]).astype(np.float32)

                # Update D network
                if not d_overpowered:
                    _, d_loss = s.run([model.d_op, model.d_loss],
                                      feed_dict={
                                          model.x: batch_x,
                                          model.z: batch_z
                                      })

                # Update G network
                _, g_loss = s.run([model.g_op, model.g_loss],
                                  feed_dict={model.z: batch_z})

                d_overpowered = d_loss < g_loss / 2.

                if step % train_step['logging_interval'] == 0:
                    batch_z = np.random.uniform(
                        -1., 1.,
                        [train_step['batch_size'], model.z_dim]).astype(
                            np.float32)

                    d_loss, g_loss, summary = s.run(
                        [model.d_loss, model.g_loss, model.merged],
                        feed_dict={
                            model.x: batch_x,
                            model.z: batch_z,
                        })

                    d_overpowered = d_loss < g_loss / 2.

                    # Print loss
                    print("[+] Epoch %03d Step %05d => " % (epoch, step),
                          " D loss : {:.8f}".format(d_loss),
                          " G loss : {:.8f}".format(g_loss))

                    # Training G model with sample image and noise
                    samples = s.run(model.g,
                                    feed_dict={
                                        model.x: sample_x,
                                        model.z: sample_z,
                                    })

                    # Summary saver
                    model.writer.add_summary(summary, step)

                    # Export image generated by model G
                    sample_image_height = model.sample_size
                    sample_image_width = model.sample_size
                    sample_dir = results[
                        'output'] + 'train_{0}_{1}.png'.format(epoch, step)

                    # Generated image save
                    iu.save_images(
                        samples,
                        size=[sample_image_height, sample_image_width],
                        image_path=sample_dir)

                    # Model save
                    model.saver.save(s, results['model'], global_step=step)

                step += 1

        end_time = time.time() - start_time  # Clocking end

        # Elapsed time
        print("[+] Elapsed time {:.8f}s".format(end_time))

        # Close tf.Session
        s.close()
def main():
    start_time = time.time()  # Clocking start

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as s:
        # DCGAN model
        model = dcgan.DCGAN(s, batch_size=train_step['batch_size'])

        # Initializing variables
        s.run(tf.global_variables_initializer())

        # Load model & Graph & Weights
        saved_global_step = 0

        ckpt = tf.train.get_checkpoint_state('./model/')
        if ckpt and ckpt.model_checkpoint_path:
            # Restores from checkpoint
            model.saver.restore(s, ckpt.model_checkpoint_path)

            saved_global_step = int(
                ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
            print("[+] global step : %d" % saved_global_step,
                  " successfully loaded")
        else:
            print('[-] No checkpoint file found')

        # Training, Test data set
        # loading CelebA DataSet
        ds = DataSet(
            height=64,
            width=64,
            channel=3,
            ds_image_path="D:\\DataSet/CelebA/CelebA-64.h5",
            ds_label_path="D:\\DataSet/CelebA/Anno/list_attr_celeba.txt",
            # ds_image_path="D:\\DataSet/CelebA/Img/img_align_celeba/",
            ds_type="CelebA",
            use_save=False,
            save_file_name="D:\\DataSet/CelebA/CelebA-64.h5",
            save_type="to_h5",
            use_img_scale=False,
            # img_scale="-1,1"
        )

        # saving sample images
        test_images = np.reshape(iu.transform(ds.images[:16], inv_type='127'),
                                 (16, 64, 64, 3))
        iu.save_images(test_images,
                       size=[4, 4],
                       image_path=results['output'] + 'sample.png',
                       inv_type='127')

        ds_iter = DataIterator(x=ds.images,
                               y=None,
                               batch_size=train_step['batch_size'],
                               label_off=True)

        global_step = saved_global_step
        start_epoch = global_step // (len(ds.train_images) // model.batch_size
                                      )  # recover n_epoch
        ds_iter.pointer = saved_global_step % (
            len(ds.train_images) // model.batch_size)  # recover n_iter
        for epoch in range(start_epoch, train_step['epoch']):
            for batch_x in ds_iter.iterate():
                batch_x = np.reshape(iu.transform(batch_x, inv_type='127'),
                                     (model.batch_size, model.height,
                                      model.width, model.channel))
                batch_z = np.random.uniform(
                    -1., 1.,
                    [model.batch_size, model.z_dim]).astype(np.float32)

                # Update D network
                _, d_loss = s.run([model.d_op, model.d_loss],
                                  feed_dict={
                                      model.x: batch_x,
                                      model.z: batch_z
                                  })

                # Update G network
                _, g_loss = s.run([model.g_op, model.g_loss],
                                  feed_dict={
                                      model.x: batch_x,
                                      model.z: batch_z,
                                  })

                if global_step % train_step['logging_interval'] == 0:
                    d_loss, g_loss, summary = s.run(
                        [model.d_loss, model.g_loss, model.merged],
                        feed_dict={
                            model.x: batch_x,
                            model.z: batch_z,
                        })

                    # Print loss
                    print(
                        "[+] Epoch %03d Step %05d => " % (epoch, global_step),
                        " D loss : {:.8f}".format(d_loss),
                        " G loss : {:.8f}".format(g_loss))

                    # Training G model with sample image and noise
                    sample_z = np.random.uniform(
                        -1., 1., [model.sample_num, model.z_dim])
                    samples = s.run(model.g, feed_dict={
                        model.z: sample_z,
                    })

                    # Summary saver
                    model.writer.add_summary(summary, global_step)

                    # Export image generated by model G
                    sample_image_height = model.sample_size
                    sample_image_width = model.sample_size
                    sample_dir = results['output'] + 'train_{0}.png'.format(
                        global_step)

                    # Generated image save
                    iu.save_images(
                        samples,
                        size=[sample_image_height, sample_image_width],
                        image_path=sample_dir,
                        inv_type='127')

                    # Model save
                    model.saver.save(s, results['model'], global_step)

                global_step += 1

        end_time = time.time() - start_time  # Clocking end

        # Elapsed time
        print("[+] Elapsed time {:.8f}s".format(end_time))

        # Close tf.Session
        s.close()
Beispiel #5
0
def main():
    start_time = time.time()  # Clocking start

    # MNIST Dataset load
    mnist = input_data.read_data_sets('../data/MNIST_data', one_hot=True)

    # GPU configure
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as s:
        # GAN Model
        model = dcgan.DCGAN(s)

        # Initializing
        s.run(tf.global_variables_initializer())

        sample_x, _ = mnist.train.next_batch(model.sample_num)
        sample_x = np.reshape(sample_x,[-1,model.input_height,model.input_width,model.channel])
        sample_z = np.random.uniform(-1., 1., [model.sample_num, model.z_dim]).astype(np.float32)

        d_overpowered = False
        for step in range(train_step['global_step']):
            batch_x, _ = mnist.train.next_batch(model.batch_size)
            batch_x = np.reshape(batch_x,[-1,model.input_height,model.input_width,model.channel])
            batch_x = batch_x*2 - 1

            batch_z = np.random.uniform(-1., 1., size=[model.batch_size, model.z_dim]).astype(np.float32)

            # Update D network
            if not d_overpowered:
                _, d_loss = s.run([model.d_op, model.d_loss], feed_dict={model.x: batch_x,model.z: batch_z,})

            # Update G network
            _, g_loss = s.run([model.g_op, model.g_loss],feed_dict={model.x: batch_x,model.z: batch_z,})

            d_overpowered = d_loss < (g_loss / 2)

            if step % train_step['logging_interval'] == 0:
                batch_x, _ = mnist.test.next_batch(model.batch_size)
                batch_x = np.reshape(batch_x,[-1,model.input_height,model.input_width,model.channel])
                batch_x = batch_x*2 - 1
                batch_z = np.random.uniform(-1., 1., [model.batch_size, model.z_dim]).astype(np.float32)

                d_loss, g_loss, summary = s.run([model.d_loss, model.g_loss, model.merged],
                    feed_dict={model.x: batch_x,model.z: batch_z,})

                d_overpowered = d_loss < (g_loss / 2)

                # Print loss
                print("[+] Step %08d => " % step," D loss : {:.8f}".format(d_loss)," G loss : {:.8f}".format(g_loss))

                # Training G model with sample image and noise
                samples = s.run(model.g,feed_dict={model.x: sample_x,model.z: sample_z,})

                samples = np.reshape(samples, [-1, model.output_height, model.output_width, model.channel])

                # Summary saver
                model.writer.add_summary(summary, step)

                # Export image generated by model G
                sample_image_height = model.sample_size
                sample_image_width = model.sample_size
                sample_dir = results['output'] + 'train_{:08d}.png'.format(step)

                # Generated image save
                iu.save_images(samples,size=[sample_image_height, sample_image_width],image_path=sample_dir)

                # Model save
                model.saver.save(s, results['model'], global_step=step)

    end_time = time.time() - start_time  # Clocking end

    # Elapsed time
    print("[+] Elapsed time {:.8f}s".format(end_time))  # took about 370s on my machine

    # Close tf.Session
    s.close()