Ejemplo n.º 1
0
def main(args):

    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists('samples_progress'):
        os.makedirs('samples_progress')
    for i in range(8):
        if not os.path.exists('samples_progress/part{:1d}'.format(i + 1)):
            os.makedirs('samples_progress/part{:1d}'.format(i + 1))

    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    with open(args.settings_file_name, "a") as settings_file:
        for key, val in sorted(vars(args).items()):
            settings_file.write(key + ": " + str(val) + "\n")

    with open(args.progress_file_name, "a") as prog_file:
        prog_file.write("\n" + datetime.now().strftime("%H:%M:%S ") +
                        "Started\n")

    with tf.Session(config=run_config) as sess:
        dcgan = model.DCGAN(sess, args)

        if args.train:
            dcgan.train()

            with open(args.progress_file_name, 'a') as prog_file:
                prog_file.write("\n" + datetime.now().strftime("%H:%M:%S ") +
                                "Finished training.\n")
        else:
            if not dcgan.load(args.checkpoint_dir)[0]:
                raise Exception("[!] Train a model first, then run test mode")

        # Below is codes for visualization
        if args.vis_type == 0:
            vis_options = [6, 7, 9, 10]
            for option in vis_options:
                print("Visualizing option %s" % option)
                OPTION = option
                #utils.visualize(sess, dcgan, args, OPTION)
                utils.visualize(sess, dcgan, OPTION, save_input=True)
        else:
            OPTION = args.vis_type
            utils.visualize(sess, dcgan, OPTION)
Ejemplo n.º 2
0
def restore_model():
    sess = tf.Session(config=run_config)
    dcgan = model.DCGAN(sess,
                        input_width=FLAGS.input_width,
                        input_height=FLAGS.input_height,
                        output_height=FLAGS.output_height,
                        output_width=FLAGS.output_width,
                        batch_size=FLAGS.batch_size,
                        sample_num=FLAGS.batch_size,
                        dataset_name=FLAGS.dataset,
                        input_fname_pattern=FLAGS.input_fname_pattern,
                        crop=FLAGS.crop,
                        checkpoint_dir=FLAGS.checkpoint_dir,
                        sample_dir=FLAGS.sample_dir)

    dcgan.load(
        FLAGS.checkpoint_dir)  # by here, we should have our recovered model
    return dcgan, sess
Ejemplo n.º 3
0
run_config.gpu_options.allow_growth = True

config = type("Foo", (object, ), {})()
config.dataset = 'celebA'
config.batch_size = 1
seconds_per_random_sample = 4

#with tf.Session(config=run_config) as sess:
sess = tf.Session(config=run_config)
dcgan = model.DCGAN(
    sess,
    input_height=108,
    input_width=108,
    output_width=64,
    output_height=64,
    batch_size=config.batch_size,
    sample_num=64,
    dataset_name='celebA',
    input_fname_pattern='*.jpg',
    crop=True,  #true for training
    checkpoint_dir='checkpoint',
    sample_dir='samples')

if not dcgan.load('checkpoint')[0]:
    print('Cannot find checkpoint!')

utils.show_all_variables()


def get_mask(file):
    image = imread(file)
Ejemplo n.º 4
0
dataset = train_dataset.DataPipeLine(train_path,train=True,onehot=True)
#拆包应当在第一步完成,不应当放在map中 因为会遇到可能无法堆叠的shape
dataset = tf.data.Dataset.from_generator(dataset.generator,output_types=(tf.float32,tf.float32),output_shapes=((28,28),(10)))\
            .map(map_func,num_parallel_calls=num_threads)\
            .batch(BATCH_SIZE)\
            .prefetch(buffer_size = tf.data.experimental.AUTOTUNE)

# for A,B in dataset:
#     print(A.shape,B.shape)
#     for i in range(100):
#         print(A[i,:])
#     break


test_set = train_dataset.DataPipeLine(train_path,train=False,onehot=True) #但其实毫无用处,仅仅是噪声作为输入的堆叠
test_set = tf.data.Dataset.from_generator(test_set.generator,output_types=(tf.float32,tf.float32),output_shapes=((28,28),(10)))\
            .map(map_func_z,num_parallel_calls=num_threads)\
            .batch(1)\
            .prefetch(buffer_size = tf.data.experimental.AUTOTUNE)
# for i,noise in enumerate(test_set):
#     print(i,noise.shape)

model = model.DCGAN(train_set=dataset,
                       test_set=test_set,
                       loss_name="LSGAN",
                       mixed_precision=True,
                       learning_rate=2e-4,
                       tmp_path=tmp_path,
                       out_path=out_path)
model.build(input_shape_G=[None,100],input_shape_D=[None,28,28,1])
model.test(take_nums=100)
Ejemplo n.º 5
0
def main(_):
    # Define placeholder
    real_data = tf.placeholder(
        tf.float32, shape=[None, 64, 64, 3], name='real_data')
    generated_inputs = tf.placeholder(
        tf.float32, [None, 64], name='generated_inputs')

    # Create DCGAN model
    dcgan_model = model.DCGAN(is_training=True, final_size=64)
    outputs_dict = dcgan_model.dcgan_model(real_data, generated_inputs)
    generated_data = outputs_dict['generated_data']
    generated_data_ = tf.identity(generated_data, name='generated_data')
    discriminator_gen_outputs = outputs_dict['discriminator_gen_outputs']
    discriminator_real_outputs = outputs_dict['discriminator_real_outputs']
    generator_variables = outputs_dict['generator_variables']
    discriminator_variables = outputs_dict['discriminator_variables']
    loss_dict = dcgan_model.loss(discriminator_real_outputs,
                                 discriminator_gen_outputs)
    discriminator_loss = loss_dict['dis_loss']
    discriminator_loss_on_real = loss_dict['dis_loss_on_real']
    discriminator_loss_on_generated = loss_dict['dis_loss_on_generated']
    generator_loss = loss_dict['gen_loss']

    # Write loss values to logdir (tensorboard)
    tf.summary.scalar('discriminator_loss', discriminator_loss)
    tf.summary.scalar('discriminator_loss_on_real', discriminator_loss_on_real)
    tf.summary.scalar('discriminator_loss_on_generated',
                      discriminator_loss_on_generated)
    tf.summary.scalar('generator_loss', generator_loss)
    merged_summary = tf.summary.merge_all(key=tf.GraphKeys.SUMMARIES)

    # Create optimizer
    discriminator_optimizer = tf.train.AdamOptimizer(learning_rate=0.0004,  # 0.0005
                                                     beta1=0.5)
    discriminator_train_step = discriminator_optimizer.minimize(
        discriminator_loss, var_list=discriminator_variables)
    generator_optimizer = tf.train.AdamOptimizer(learning_rate=0.0001,
                                                 beta1=0.5)
    generator_train_step = generator_optimizer.minimize(
        generator_loss, var_list=generator_variables)

    saver = tf.train.Saver(var_list=tf.global_variables())

    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)

        # Write model graph to tensorboard
        if not FLAGS.logdir:
            raise ValueError('logdir is not specified.')
        if not os.path.exists(FLAGS.logdir):
            os.makedirs(FLAGS.logdir)
        writer = tf.summary.FileWriter(FLAGS.logdir, sess.graph)

        fixed_images, fixed_generated_inputs = get_next_batch()

        for i in range(FLAGS.num_steps):
            if (i + 1) % 500 == 0:
                batch_images = fixed_images
                batch_generated_inputs = fixed_generated_inputs
            else:
                batch_images, batch_generated_inputs = get_next_batch()
            train_dict = {real_data: batch_images,
                          generated_inputs: batch_generated_inputs}

            # Update discriminator network
            sess.run(discriminator_train_step, feed_dict=train_dict)

            # Update generator network five times
            sess.run(generator_train_step, feed_dict=train_dict)
            sess.run(generator_train_step, feed_dict=train_dict)
            sess.run(generator_train_step, feed_dict=train_dict)
            sess.run(generator_train_step, feed_dict=train_dict)
            sess.run(generator_train_step, feed_dict=train_dict)

            summary, generated_images = sess.run(
                [merged_summary, generated_data], feed_dict=train_dict)

            # Write loss values to tensorboard
            writer.add_summary(summary, i + 1)

            if (i + 1) % 500 == 0:
                # Save model
                model_save_path = os.path.join(FLAGS.logdir, 'model.ckpt')
                saver.save(sess, save_path=model_save_path, global_step=i + 1)

                # Save generated images
                if not FLAGS.generated_images_save_dir:
                    FLAGS.generated_images_save_dir = './generated_images'
                if not os.path.exists(FLAGS.generated_images_save_dir):
                    os.makedirs(FLAGS.generated_images_save_dir)
                write_images(
                    generated_images, FLAGS.generated_images_save_dir, i)

        writer.close()
Ejemplo n.º 6
0
def main(args):
    dcgan = model.DCGAN(DataDistribution(args.data_dir), args.batch_size,
                        args.epoch_size, args.learning_rate, args.decay_rate,
                        args.log_every, args.job_dir)
    dcgan.train()
save_dir.mkdir(parents=True)

# Plot some training images
real_batch = next(iter(dataloader))
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title("Training Images")
plt.imshow(
    np.transpose(
        vutils.make_grid(real_batch[0].to(device)[:64],
                         padding=2,
                         normalize=True).cpu(), (1, 2, 0)))
plt.savefig(save_dir / "dataset.jpg")

import model
dcgan = model.DCGAN(ngpu, device, save_dir)
netG = dcgan.generator
netD = dcgan.discriminator
print(netG)
print(netD)

# Initialize BCELoss function
criterion = nn.BCELoss()

# Create batch of latent vectors that we will use to visualize
#  the progression of the generator
fixed_noise = torch.randn(64, model.nz, 1, 1, device=device)

# Establish convention for real and fake labels during training
real_label = 1.
fake_label = 0.
Ejemplo n.º 8
0
import model
import mxnet as mx
#implementation

#dataset = CIFAR10 or MNIST
result = model.DCGAN(epoch=1,
                     batch_size=128,
                     save_period=100,
                     load_period=100,
                     optimizer="adam",
                     beta1=0.5,
                     learning_rate=0.0002,
                     dataset="MNIST",
                     ctx=mx.gpu(0))
print("///" + result + "///")

def plot_generated(X):
    # Plot the fake images from the last epoch
    plt.figure(figsize=(10, 5))
    plt.axis("off")
    plt.title("Fake Images")
    plt.imshow(np.transpose(X, (1, 2, 0)))
    plt.show()


ngpu = 1
device = torch.device("cuda:0" if (
    torch.cuda.is_available() and ngpu > 0) else "cpu")
model.nc = 3
dcgan = model.DCGAN(ngpu, device)
dcgan.load(
    #'checkpoints/2021-05-25T09-16-41_FashionMNIST/dcgan_generator.chkpt','checkpoints/2021-05-25T09-16-41_FashionMNIST/dcgan_discriminator.chkpt'
    'checkpoints/2021-05-25T00-33-43_CelebFaces/dcgan_generator.chkpt',
    'checkpoints/2021-05-25T00-33-43_CelebFaces/dcgan_discriminator.chkpt')

# generate points in latent space
pts = generate_latent_points(model.nz, 2)
# interpolate points in latent space
interpolated = interpolate_points(pts[0], pts[1])
# generate images
noise = torch.randn(100, model.nz, 1, 1, device=device)
noise = interpolated
X = dcgan.generate(noise)
# plot the result
plot_generated(vutils.make_grid(X, padding=2,
Ejemplo n.º 10
0
print(np.array(x).shape)
x_train = x[:-100]
x_test = x[-100:]

x_train = np.array(x_train)
x_train = x_train.reshape(x_train.shape[0], 14, 1)

x_test = np.array(x_test)
x_test = x_test.reshape(x_test.shape[0], 14, 1)

print("x_train's shape : ", np.array(x_train).shape)
x_test = np.array(data_resize(x_test))
print("x_test's shape : ", np.array(x_test).shape)

dcgan = model.DCGAN(x_train=x_train, x_test=x_test)
model_name = 'DCGAN_mnist_model'
#Prepare model model saving directory.
save_dir = os.path.join(os.getcwd(), 'saved_models')

train_steps = 230000
timer = model.ElapsedTimer()
dcgan.train(train_steps=train_steps,
            epoch=400,
            batch_size=255,
            save_interval=500,
            predict_interval=1000)
timer.elapsed_time()
# # if not os.path.isdir(save_dir):
# #     os.makedirs(save_dir)
# # dcgan.generator().save_weights(os.path.join(save_dir, 'generator'.format(train_steps)))