Ejemplo n.º 1
0
def train(restore, is_master=True):
    strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
    with strategy.scope():
        encoders = get_encoders()
        dataset = get_dataset(encoders)
        train_data = dataset.batch(config.BATCH_SIZE)

        _, generator = get_generator(encoders)

        checkpoint_path = path.join(config.CHECKPOINT_DIR, "keras",
                                    "generator.ckpt")
        if restore:
            generator.load_weights(checkpoint_path)

    callbacks = []
    if is_master:
        generator.summary()
        stats_filename = datetime.now().strftime("%Y%m%d_%H%M") + ".csv"
        callbacks = [
            K.callbacks.CSVLogger(
                path.join(config.LOG_DIR, "stats", stats_filename)),
            # K.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True),
            EvaluationLogger(generator, dataset, encoders)
        ]
    initial_epoch = generator.optimizer.iterations.numpy(
    ) // config.STEPS_PER_EPOCH
    generator.fit(train_data,
                  epochs=config.NUM_EPOCHS,
                  initial_epoch=initial_epoch,
                  steps_per_epoch=config.STEPS_PER_EPOCH,
                  callbacks=callbacks)
Ejemplo n.º 2
0
def define_objective(charmap, real_inputs_discrete, seq_length, gan_type="wgan", rnn_cell=None):
    assert gan_type in ["wgan", "fgan", "cgan"]
    assert rnn_cell
    other_ops = {}
    real_inputs = tf.one_hot(real_inputs_discrete, len(charmap))
    Generator = get_generator(FLAGS.GENERATOR_MODEL)
    Discriminator = get_discriminator(FLAGS.DISCRIMINATOR_MODEL)
    train_pred, inference_op = Generator(BATCH_SIZE, len(charmap), seq_len=seq_length, gt=real_inputs, rnn_cell=rnn_cell)

    real_inputs_substrings = get_substrings_from_gt(real_inputs, seq_length, len(charmap))

    disc_real = Discriminator(real_inputs_substrings, len(charmap), seq_length, reuse=False,
        rnn_cell=rnn_cell)
    disc_fake = Discriminator(train_pred, len(charmap), seq_length, reuse=True,
        rnn_cell=rnn_cell)
    disc_on_inference = Discriminator(inference_op, len(charmap), seq_length, reuse=True,
        rnn_cell=rnn_cell)


    if gan_type == "wgan":
        disc_cost, gen_cost = loss_d_g(disc_fake, disc_real, train_pred, real_inputs_substrings, charmap, seq_length, Discriminator, rnn_cell)
    elif gan_type == "fgan":
        fgan = FisherGAN()
        disc_cost, gen_cost = fgan.loss_d_g(disc_fake, disc_real, train_pred, real_inputs_substrings, charmap, seq_length, Discriminator)
        other_ops["alpha_optimizer_op"] = fgan.alpha_optimizer_op
    else:
        raise NotImplementedError("Cramer GAN not implemented")

    return disc_cost, gen_cost, train_pred, disc_fake, disc_real, disc_on_inference, inference_op, other_ops
Ejemplo n.º 3
0
def train():
    images, images_path = get_celebA(flags.output_size, flags.n_epoch,
                                     flags.batch_size)
    G = get_generator([None, flags.z_dim])
    D = get_discriminator(
        [None, flags.output_size, flags.output_size, flags.c_dim])

    G.train()
    D.train()

    d_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)
    g_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)

    n_step_epoch = int(len(images_path) // flags.batch_size)

    for epoch in range(flags.n_epoch):
        for step, batch_images in enumerate(images):
            if batch_images.shape[0] != flags.batch_size:
                break

            step_time = time.time()
            with tf.GradientTape(persistent=True) as tape:
                z = np.random.normal(loc=0.0,
                                     scale=1.0,
                                     size=[flags.batch_size,
                                           flags.z_dim]).astype(np.float32)
                d_logits = D(G(z))
                d2_logits = D(batch_images)
                # discriminator: real images are labelled as 1
                d_loss_real = tl.cost.sigmoid_cross_entropy(
                    d2_logits, tf.ones_like(d2_logits), name='dreal')
                # discriminator: images from generator (fake) are labelled as 0
                d_loss_fake = tl.cost.sigmoid_cross_entropy(
                    d_logits, tf.zeros_like(d_logits), name='dfake')
                # combined loss for updating discriminator
                d_loss = d_loss_real + d_loss_fake
                # generator: try to fool discriminator to output 1
                g_loss = tl.cost.sigmoid_cross_entropy(d_logits,
                                                       tf.ones_like(d_logits),
                                                       name='gfake')

            grad = tape.gradient(g_loss, G.trainable_weights)
            g_optimizer.apply_gradients(zip(grad, G.trainable_weights))
            grad = tape.gradient(d_loss, D.trainable_weights)
            d_optimizer.apply_gradients(zip(grad, D.trainable_weights))
            del tape

            if step % flags.print_every_step == 0:
                print("Epoch: [{}/{}] [{}/{}] took: {:3f}, d_loss: {:5f}, g_loss: {:5f}".format(epoch, \
                      flags.n_epoch, step, n_step_epoch, time.time()-step_time, d_loss, g_loss))

        if np.mod(epoch, flags.save_every_epoch) == 0:
            G.save_weights('{}/G_{}.h5'.format(flags.checkpoint_dir, epoch))
            D.save_weights('{}/D_{}.h5'.format(flags.checkpoint_dir, epoch))
            G.eval()
            result = G(z)
            G.train()
            tl.visualize.save_images(
                result.numpy(), [num_tiles, num_tiles],
                '{}/train_{:02d}.png'.format(flags.sample_dir, epoch))
Ejemplo n.º 4
0
def main():
    parser = argparse.ArgumentParser(description='Generate SR images')
    parser.add_argument('--arc', required=True, type=str, help='Model architecture')
    parser.add_argument('--model_path', required=True, type=str, help='Path to a model')
    parser.add_argument('--lr_dir', type=str, default=None, help='Path to lr images')
    parser.add_argument('--lr_path', type=str, default=None, help='Path to a lr image')
    parser.add_argument('--ext', type=str, help='Image extension')
    parser.add_argument('--default', action='store_true', help='Path to lr images')
    parser.add_argument('--save_dir', type=str, help='folder to save SR images')
    parser.add_argument('--cuda', type=str, default=None, help='a list of gpus')
    args = parser.parse_args()

    if args.cuda is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda

    global sess
    sess = tf.Session()
    model = get_generator(args.arc, is_train=False)
    print("** Loading model at: " + args.model_path)
    model.load_weights(args.model_path)

    if args.default:
        lr_dirs = [os.path.join("./data/test/", dataset, "LR") for dataset in ["Set5", "Set14", "BSDS100"]]
        save_dirs = [os.path.join("./output/", args.arc, dataset) for dataset in ["Set5", "Set14", "BSDS100"]]
        for lr_dir, save_dir in zip(lr_dirs, save_dirs):
            sr_from_folder(model, lr_dir, save_dir, ".png")
    else:
        sr_from_folder(model, args.lr_dir, args.save_dir, args.ext)
        if args.lr_path is not None:
            sr_from_path(model, args.lr_path, args.save_dir)
Ejemplo n.º 5
0
def define_objective(charmap, real_inputs_discrete, seq_length):
    real_inputs = tf.one_hot(real_inputs_discrete, len(charmap))
    Generator = get_generator(FLAGS.GENERATOR_MODEL)
    Discriminator = get_discriminator(FLAGS.DISCRIMINATOR_MODEL)
    train_pred, inference_op = Generator(BATCH_SIZE,
                                         len(charmap),
                                         seq_len=seq_length,
                                         gt=real_inputs)

    real_inputs_substrings = get_substrings_from_gt(real_inputs, seq_length,
                                                    len(charmap))

    disc_real = Discriminator(real_inputs_substrings,
                              len(charmap),
                              seq_length,
                              reuse=False)
    disc_fake = Discriminator(train_pred, len(charmap), seq_length, reuse=True)
    disc_on_inference = Discriminator(inference_op,
                                      len(charmap),
                                      seq_length,
                                      reuse=True)

    disc_cost, gen_cost = loss_d_g(disc_fake, disc_real, train_pred,
                                   real_inputs_substrings, charmap, seq_length,
                                   Discriminator)
    return disc_cost, gen_cost, train_pred, disc_fake, disc_real, disc_on_inference, inference_op
Ejemplo n.º 6
0
    def build_model(self):
        self.G = get_generator(self.g_conv_dim, self.n_labels,
                               self.g_repeat_num, self.image_size)
        self.D = get_discriminator(self.d_conv_dim, self.n_labels,
                                   self.d_repeat_num, self.image_size)

        print(self.G.summary())

        self.d_optimizer = keras.optimizers.Adam(lr=self.d_lr,
                                                 beta_1=self.beta_1,
                                                 beta_2=self.beta_2)
        self.g_optimizer = keras.optimizers.Adam(lr=self.g_lr,
                                                 beta_1=self.beta_2,
                                                 beta_2=self.beta_2)

        self.D.trainable = False

        combined_real_img = Input(shape=(self.image_size, self.image_size, 3))
        input_orig_labels = Input(shape=(self.image_size, self.image_size,
                                         self.n_labels))
        input_target_labels = Input(shape=(self.image_size, self.image_size,
                                           self.n_labels))

        concatted_input = Concatenate(axis=3)(
            [combined_real_img, input_target_labels])

        combined_fake_img = self.G(concatted_input)
        output_src, output_cls = self.D(combined_fake_img)
        concatted_combined_fake_img = Concatenate(axis=3)(
            [combined_fake_img, input_orig_labels])
        reconstr_img = self.G(concatted_combined_fake_img)

        self.combined = Model(
            inputs=[combined_real_img, input_orig_labels, input_target_labels],
            outputs=[reconstr_img, output_src, output_cls])

        self.combined.compile(
            loss=["mae", neg_mean_loss, self.custom_bin],
            loss_weights=[self.lambda_rec, 1, self.lambda_cls],
            optimizer=self.g_optimizer)

        shape = (self.image_size, self.image_size, 3)
        fake_input, real_input, interpolation = Input(shape), Input(
            shape), Input(shape)
        norm = GradNorm()([self.D(interpolation)[0], interpolation])
        fake_output_src, fake_output_cls = self.D(fake_input)
        real_output_src, real_output_cls = self.D(real_input)
        self.DIS = Model(
            [real_input, fake_input, interpolation],
            [fake_output_src, real_output_src, real_output_cls, norm])
        # self.DIS = Model([gen_input], output_D)

        self.D.trainable = True

        self.DIS.compile(
            loss=[mean_loss, neg_mean_loss, self.custom_bin, 'mse'],
            loss_weights=[1, 1, self.lambda_cls, self.lambda_gp],
            optimizer=self.d_optimizer)
Ejemplo n.º 7
0
 def __init__(self, args):
     super(DeepFillV2, self).__init__()
     self.hparams = args
     self.net_G = get_generator(args)
     self.net_D = InpaintSADiscriminator(args.input_nc)
     print('#Params Generator: ', f'{count_parameters(self.net_G) / 1e6}M')
     print('#Params Discriminator: ', f'{count_parameters(self.net_D) / 1e6}M')
     self.recon_loss = ReconstructionLoss(args.l1_c_h, args.l1_c_nh, args.l1_r_h, args.l1_r_nh)
     self.refined_as_discriminator_input = args.refined_as_discriminator_input
     self.visualization_dataloader = self.setup_dataloader_for_visualizations()
Ejemplo n.º 8
0
def main(args):
    if args.g >= 0 and torch.cuda.is_available():
        device = torch.device(f"cuda:{args.g:d}")
        print(f"GPU mode: {args.g:d}")
    else:
        device = torch.device("cpu")
        print("CPU mode")

    result_dir = Path(args.result_dir)

    # MNISTデータ取得
    mnist_train = MNIST(root=".",
                        download=True,
                        train=True,
                        transform=lambda x: np.expand_dims(
                            np.asarray(x, dtype=np.float32), 0) / 255)
    mnist_loader = DataLoader(mnist_train, args.batchsize)
    mnist_loader = InfiniteDataLoader(mnist_loader)

    generator = get_generator(Z_DIM).to(device)
    critic = get_critic().to(device)

    opt_g = Adam(generator.parameters(), args.alpha, (args.beta1, args.beta2))
    opt_c = Adam(critic.parameters(), args.alpha, (args.beta1, args.beta2))

    trainer = Engine(
        WGANTrainer(mnist_loader, generator, critic, opt_g, opt_c, args.n_cri,
                    args.gp_lam, device))

    log_dict = {}
    accumulator = MetricsAccumulator(["generator_loss", "critic_loss"])
    trainer.add_event_handler(Events.ITERATION_COMPLETED, accumulator)
    trainer.add_event_handler(Events.ITERATION_COMPLETED(every=500),
                              record_metrics(log_dict, accumulator))
    trainer.add_event_handler(Events.ITERATION_COMPLETED(every=500),
                              print_metrics(log_dict, accumulator.keys))
    trainer.add_event_handler(
        Events.ITERATION_COMPLETED(every=500),
        plot_metrics(log_dict, "iteration", accumulator.keys,
                     result_dir / "metrics.pdf"))
    trainer.add_event_handler(
        Events.ITERATION_COMPLETED(every=500),
        save_img(generator, result_dir / "generated_samples", device))

    # 指定されたイテレーション数で終了させる
    trainer.add_event_handler(Events.ITERATION_COMPLETED(once=args.iteration),
                              lambda engine: engine.terminate())

    trainer.run(mnist_loader, max_epochs=10**10)
Ejemplo n.º 9
0
def get_generator(model_path):
	print("GPU : " , torch.cuda.is_available())	
	
	generator,optimizer_G,scheduler_G = model.get_generator(args)
	generator.to(device)

	if not torch.cuda.is_available():
		checkpoint = torch.load(model_path , map_location=torch.device('cpu'))
	else:
		checkpoint = torch.load(model_path)

	epoch = checkpoint['epoch']
	generator.load_state_dict(checkpoint['gen_state_dict'])

	return generator
def prepare_model(**params):
    model_arc = params['arc']
    model = get_generator(model_arc)

    if model_arc == 'srfeat' or model_arc == 'srgan':
        loss = mean_squared_error
    else:
        loss = mean_absolute_error

    model = load_model(model, params['resume'])
    gpu_model = make_gpu_model(model, params['n_gpus'])
    optimizer = Adam(lr=params['lr_init'])
    gpu_model.compile(optimizer=optimizer, loss=loss, metrics=[psnr])

    return model, gpu_model
Ejemplo n.º 11
0
def train():
    z = tf.contrib.distributions.Normal(0., 1.).sample([FLAGS.batch_size, FLAGS.z_dim]) #tf.placeholder(tf.float32, [None, z_dim], name='z_noise')
    ds, images_path = get_celebA(FLAGS.output_size, FLAGS.n_epoch, FLAGS.batch_size)
    iterator = ds.make_one_shot_iterator()
    images = iterator.get_next()

    G = get_generator([None, FLAGS.z_dim])
    D = get_discriminator([None, FLAGS.output_size, FLAGS.output_size, FLAGS.c_dim])

    G.train()
    D.train()
    fake_images = G(z)
    d_logits = D(fake_images)
    d2_logits = D(images)

    # discriminator: real images are labelled as 1
    d_loss_real = tl.cost.sigmoid_cross_entropy(d2_logits, tf.ones_like(d2_logits), name='dreal')
    # discriminator: images from generator (fake) are labelled as 0
    d_loss_fake = tl.cost.sigmoid_cross_entropy(d_logits, tf.zeros_like(d_logits), name='dfake')
    # cost for updating discriminator
    d_loss = d_loss_real + d_loss_fake

    # generator: try to make the the fake images look real (1)
    g_loss = tl.cost.sigmoid_cross_entropy(d_logits, tf.ones_like(d_logits), name='gfake')
    # Define optimizers for updating discriminator and generator
    d_optim = tf.train.AdamOptimizer(FLAGS.learning_rate, beta1=FLAGS.beta1) \
                      .minimize(d_loss, var_list=D.weights)
    g_optim = tf.train.AdamOptimizer(FLAGS.learning_rate, beta1=FLAGS.beta1) \
                      .minimize(g_loss, var_list=G.weights)

    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())

    n_step_epoch = int(len(images_path) // FLAGS.batch_size)
    for epoch in range(FLAGS.n_epoch):
        epoch_time = time.time()
        for step in range(n_step_epoch):
            step_time = time.time()
            _d_loss, _g_loss, _, _ = sess.run([d_loss, g_loss, d_optim, g_optim])
            print("Epoch: [{}/{}] [{}/{}] took: {:3f}, d_loss: {:5f}, g_loss: {:5f}".format(epoch, FLAGS.n_epoch, step, n_step_epoch, time.time()-step_time, _d_loss, _g_loss))
            if np.mod(step, FLAGS.save_step) == 0:
                G.save_weights('{}/G.npz'.format(FLAGS.checkpoint_dir), sess=sess, format='npz')
                D.save_weights('{}/D.npz'.format(FLAGS.checkpoint_dir), sess=sess, format='npz')
                result = sess.run(fake_images)
                tl.visualize.save_images(result, [num_tiles, num_tiles], '{}/train_{:02d}_{:04d}.png'.format(FLAGS.sample_dir, epoch, step))

    sess.close()
Ejemplo n.º 12
0
    def __init__(self, args):
        super(DeepFillV2, self).__init__()
        self.hparams = args

        self.net_G = get_generator(args)
        self.net_D = InpaintSADiscriminator(args.input_nc)

        if args.load_G:
            self.net_G.load_state_dict(torch.load(args.load_G), strict=False)
        if args.load_D:
            self.net_D.load_state_dict(torch.load(args.load_D), strict=False)

        self.recon_loss = ReconstructionLoss(args.l1_c_h, args.l1_c_nh,
                                             args.l1_r_h, args.l1_r_nh)
        if args.vgg_weight > 0:
            self.vgg_loss = PerceptionLoss()
        self.refined_as_discriminator_input = args.refined_as_discriminator_input
Ejemplo n.º 13
0
def test(weights_pathG, weights_pathE, real_path='real_image.png', reproduced_path='reproduced_image.png'):
    images, images_path = get_celebA(flags.output_size, flags.n_epoch, flags.batch_size)
    num_tiles = int(math.ceil(math.sqrt(flags.sample_size)))
    G = get_generator([None, flags.z_dim])
    G.load_weights(weights_pathG, format='npz')
    G.eval()
    E = get_encoder([None, flags.output_size, flags.output_size, flags.c_dim])
    E.load_weights(weights_pathE, format='npz')
    E.eval()

    for step, batch_images in enumerate(images):
        if batch_images.shape[0] != flags.batch_size:
            break
        result = G(E(batch_images))
        tl.visualize.save_images(batch_images.numpy(), [num_tiles, num_tiles], real_path)
        tl.visualize.save_images(result.numpy(), [num_tiles, num_tiles], reproduced_path)
        break
Ejemplo n.º 14
0
def train():
    images, images_path = get_celebA(flags.output_size, flags.n_epoch,
                                     flags.batch_size)
    G = get_generator()
    E = get_encoder()
    G.load_weights(G_weights)
    G.train()
    E.train()
    optimizer = tf.optimizers.Adam(learning_rate, beta_1=flags.beta1)

    n_step_epoch = int(len(images_path) // flags.batch_size)

    for epoch in range(n_epoch):
        for step, batch_images in enumerate(images):
            if batch_images.shape[0] != flags.batch_size:
                break

            step_time = time.time()
            with tf.GradientTape() as tape:
                z = np.random.normal(loc=0.0,
                                     scale=1.0,
                                     size=[batch_size,
                                           flags.z_dim]).astype(np.float32)
                gen = G(z)
                z_encode = E(gen)

                x_encode = E(batch_images)
                x_decode = G(x_encode)

                z_recon_loss = tl.cost.absolute_difference_error(z_encode,
                                                                 z,
                                                                 is_mean=True)
                x_recon_loss = 5. * tl.cost.absolute_difference_error(
                    x_decode, batch_images, is_mean=True)
                loss = z_recon_loss + x_recon_loss

            grad = tape.gradient(loss, E.trainable_weights)
            optimizer.apply_gradients(zip(grad, E.trainable_weights))

            if step % print_every_step == 0:
                print('Epoch: [{}/{}] step: [{}/{}] took: {:3f}, z_recon_loss: {:5f}, x_recon_loss: {:5f}'.format(epoch, n_epoch,\
                      step, n_step_epoch, time.time()-step_time, z_recon_loss, x_recon_loss))

        if epoch % save_every_epoch == 0:
            E.save_weights('{}/E_{}.h5'.format(flags.checkpoint_dir, epoch))
Ejemplo n.º 15
0
def prepare_model(**params):
    print("** Load initial generator at: " + params['g_init'])
    start = time.time()
    g = get_generator(params['arc'], is_train=False)
    g.load_weights(params['g_init'])
    print("Finish loading generator in %.2fs" % (time.time() - start))

    img_d = image_discriminator()
    img_d.compile(loss=binary_crossentropy,
                  loss_weights=[params['per_loss_w']],
                  optimizer=Adam(lr=params['lr_init']))
    img_lr_scheduler = make_lr_callback(params['lr_init'], params['lr_decay'],
                                        params['lr_decay_at_steps'])
    img_lr_scheduler.set_model(img_d)

    f_d = feature_discriminator()
    f_d.compile(loss='binary_crossentropy',
                loss_weights=[params['per_loss_w']],
                optimizer=Adam(lr=params['lr_init']))
    f_lr_scheduler = make_lr_callback(params['lr_init'], params['lr_decay'],
                                      params['lr_decay_at_steps'])
    f_lr_scheduler.set_model(f_d)

    d_g = discriminator_generator(g, img_d, f_d)
    d_g.compile(
        loss=[content_loss, 'binary_crossentropy', 'binary_crossentropy'],
        loss_weights=[1.0, params['per_loss_w'], params['per_loss_w']],
        optimizer=Adam(lr=params['lr_init']))
    d_g_lr_scheduler = make_lr_callback(params['lr_init'], params['lr_decay'],
                                        params['lr_decay_at_steps'])
    d_g_lr_scheduler.set_model(d_g)

    def on_epoch_begin(epoch):
        d_g_lr_scheduler.on_epoch_begin(epoch)
        img_lr_scheduler.on_epoch_begin(epoch)
        f_lr_scheduler.on_epoch_begin(epoch)

    def on_epoch_end(epoch):
        d_g_lr_scheduler.on_epoch_end(epoch)
        img_lr_scheduler.on_epoch_end(epoch)
        f_lr_scheduler.on_epoch_end(epoch)

    return g, img_d, f_d, d_g, on_epoch_begin, on_epoch_end
Ejemplo n.º 16
0
def define_class_objective(charmap, real_inputs_discrete, real_class_discrete,
                           seq_length, num_classes):
    real_inputs = tf.one_hot(real_inputs_discrete, len(charmap))
    Generator = get_generator(FLAGS.GENERATOR_MODEL)
    Discriminator = get_discriminator(FLAGS.DISCRIMINATOR_MODEL)
    train_pred, train_pred_class, inference_op = Generator(
        BATCH_SIZE,
        len(charmap),
        seq_len=seq_length,
        num_classes=num_classes,
        gt=real_inputs,
        gt_class=real_class_discrete)

    real_inputs_substrings, real_inputs_class = get_substrings_from_gt(
        real_inputs, real_class_discrete, seq_length, len(charmap))

    disc_real, disc_real_class = Discriminator(real_inputs_substrings,
                                               len(charmap),
                                               seq_length,
                                               num_classes,
                                               reuse=False)
    disc_fake, disc_fake_class = Discriminator(train_pred,
                                               len(charmap),
                                               seq_length,
                                               num_classes,
                                               reuse=True)
    disc_on_inference, disc_on_inference_class = Discriminator(inference_op,
                                                               len(charmap),
                                                               seq_length,
                                                               num_classes,
                                                               reuse=True)

    disc_cost, gen_cost = loss_d_g_class(disc_fake=disc_fake,
                                         disc_fake_class=disc_fake_class,
                                         disc_real=disc_real,
                                         disc_real_class=disc_real_class,
                                         gt_fake_class=train_pred_class,
                                         gt_real_class=real_inputs_class,
                                         num_classes=num_classes)

    return disc_cost, gen_cost, train_pred, disc_fake, disc_fake_class, \
           disc_real, disc_real_class, disc_on_inference, disc_on_inference_class, inference_op
Ejemplo n.º 17
0
 def __init__(self, flags, type):
     self.dataset, self.len_instance = get_mnist(flags.batch_size)
     self.G = get_generator([None, flags.z_dim],
                            gf_dim=64,
                            o_size=flags.output_size,
                            o_channel=flags.c_dim)
     self.D = get_discriminator(
         [None, flags.output_size, flags.output_size, flags.c_dim],
         df_dim=64)
     self.batch_size = flags.batch_size
     self.epoch = flags.n_epoch
     self.type = type
     assert type in methods_dict.keys()
     self.get_loss = methods_dict[type]
     if type == "WGAN":
         self.d_optimizer = tf.optimizers.RMSprop(flags.lr)
         self.g_optimizer = tf.optimizers.RMSprop(flags.lr)
     else:
         self.d_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)
         self.g_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)
Ejemplo n.º 18
0
def train():
    encoders = get_encoders()

    generator = get_generator(encoders)

    checkpoint_dir = path.join(config.CHECKPOINT_DIR, "tf")
    estimator = K.estimator.model_to_estimator(
        keras_model=generator,
        model_dir=checkpoint_dir,
        checkpoint_format="saver"
    )  # TODO: use 'checkpoint' once object-based checkpoints supported

    def input_fn():
        dataset = get_dataset(encoders)
        return dataset.batch(config.BATCH_SIZE)

    train_spec = E.TrainSpec(input_fn=input_fn)
    eval_spec = E.EvalSpec(
        input_fn=input_fn,
        hooks=[E.CheckpointSaverHook(checkpoint_dir, save_steps=1000)])

    E.train_and_evaluate(estimator, train_spec, eval_spec)
Ejemplo n.º 19
0
def train(restore):
    encoders = get_encoders()
    dataset = get_dataset(encoders)

    text_rnn, generator = get_generator(encoders)

    checkpoint_path = path.join(config.CHECKPOINT_DIR, "keras",
                                "generator.ckpt")
    if restore:
        generator.load_weights(checkpoint_path)

    stats_filename = datetime.now().strftime('%Y%m%d_%H%M') + ".csv"
    callbacks = [
        # K.callbacks.TensorBoard(path.join(config.LOG_DIR, "tf_boards")),
        K.callbacks.CSVLogger(
            path.join(config.LOG_DIR, "stats", stats_filename)),
        K.callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                    save_weights_only=True),
        EvaluationLogger(generator, dataset, encoders)
    ]

    # https://github.com/keras-team/keras/issues/1872#issuecomment-572606922
    initial_epoch = generator.optimizer.iterations.numpy(
    ) // config.STEPS_PER_EPOCH
    train_data = dataset.batch(config.BATCH_SIZE).take(config.STEPS_PER_EPOCH)
    # val_data = dataset.batch(config.BATCH_SIZE).take(8)
    generator.fit(
        train_data,
        epochs=config.NUM_EPOCHS,
        initial_epoch=initial_epoch,
        # validation_data=val_data,
        callbacks=callbacks)

    checkpoint_path = path.join(config.CHECKPOINT_DIR, "keras",
                                "text_rnn.ckpt")
    text_rnn.save_weights(checkpoint_path)
Ejemplo n.º 20
0
def train():
    images, images_path = get_celebA(flags.output_size, flags.n_epoch,
                                     flags.batch_size)
    G = get_generator([None, flags.z_dim])
    D = get_discriminator(
        [None, flags.output_size, flags.output_size, flags.c_dim])

    G.train()
    D.train()

    d_optimizer = tf.optimizers.Adam(flags.learning_rate, beta_1=flags.beta1)
    g_optimizer = tf.optimizers.Adam(flags.learning_rate, beta_1=flags.beta1)

    n_step_epoch = int(len(images_path) // flags.batch_size)

    for step, batch_images in enumerate(images):
        step_time = time.time()
        with tf.GradientTape(persistent=True) as tape:
            # z = tf.distributions.Normal(0., 1.).sample([flags.batch_size, flags.z_dim]) #tf.placeholder(tf.float32, [None, z_dim], name='z_noise')
            z = np.random.normal(loc=0.0,
                                 scale=1.0,
                                 size=[flags.batch_size,
                                       flags.z_dim]).astype(np.float32)
            d_logits = D(G(z))
            d2_logits = D(batch_images)
            # discriminator: real images are labelled as 1
            d_loss_real = tl.cost.sigmoid_cross_entropy(
                d2_logits, tf.ones_like(d2_logits), name='dreal')
            # discriminator: images from generator (fake) are labelled as 0
            d_loss_fake = tl.cost.sigmoid_cross_entropy(
                d_logits, tf.zeros_like(d_logits), name='dfake')
            # combined loss for updating discriminator
            d_loss = d_loss_real + d_loss_fake
            # generator: try to fool discriminator to output 1
            g_loss = tl.cost.sigmoid_cross_entropy(d_logits,
                                                   tf.ones_like(d_logits),
                                                   name='gfake')

        grad = tape.gradient(g_loss, G.weights)
        g_optimizer.apply_gradients(zip(grad, G.weights))
        grad = tape.gradient(d_loss, D.weights)
        d_optimizer.apply_gradients(zip(grad, D.weights))
        del tape

        print(
            "Epoch: [{}/{}] [{}/{}] took: {:3f}, d_loss: {:5f}, g_loss: {:5f}".
            format(step // n_step_epoch, flags.n_epoch, step, n_step_epoch,
                   time.time() - step_time, d_loss, g_loss))
        if np.mod(step, flags.save_step) == 0:
            G.save_weights('{}/G.npz'.format(flags.checkpoint_dir),
                           format='npz')
            D.save_weights('{}/D.npz'.format(flags.checkpoint_dir),
                           format='npz')
            G.eval()
            result = G(z)
            G.train()
            tl.visualize.save_images(
                result.numpy(), [num_tiles, num_tiles],
                '{}/train_{:02d}_{:04d}.png'.format(flags.sample_dir,
                                                    step // n_step_epoch,
                                                    step))
Ejemplo n.º 21
0
from model import get_generator, get_discriminator
from tqdm import tqdm


def preprocess_data(img_input, au_input):
    au = tf.expand_dims(au_input, axis=1, name='expand_dims1')  #[None, 1, 17]
    au = tf.expand_dims(au, axis=2, name='expand_dims2')  #[None, 1, 1, 17]
    au = tf.tile(au, multiples=[1, 128, 128, 1],
                 name='tile')  #[None, 128, 128, 17]
    x = tf.concat([img_input, au], axis=3,
                  name='concat')  #[None, 128, 128, 20]
    return x


if __name__ == '__main__':
    G = get_generator([None, 128, 128, 20])
    D = get_discriminator([None, 128, 128, 3])

    G_path = 'G_30.npz'
    tl.files.load_and_assign_npz(name=G_path, network=G)
    D_path = 'D_30.npz'
    tl.files.load_and_assign_npz(name=D_path, network=D)

    imgs_names = os.listdir('test_face')
    real_src = face_recognition.load_image_file('test.jpeg')  # RGB image
    face_loc = face_recognition.face_locations(real_src)

    top, right, bottom, left = face_loc[0]
    if len(face_loc) == 1:
        top, right, bottom, left = face_loc[0]
Ejemplo n.º 22
0
def maskBig(x, target, threshold):
    y = x * (target-0.5)
    x[y>threshold] = 0.0
    return x

if __name__ == '__main__':
    #parser.add_argument('-save', type=str, default = './checkpoint/test/', help='place to save')
    _path = ''#'/content/drive/My Drive/Colab Notebooks/myblast/'
    
    config = configparser.ConfigParser()
    config.read(_path+'mixed_15720.ini')
    #gpu_tracker.track()
    encoder = model.get_encoder(config, "M")
    discriminator = model.get_discriminator(config)
    generator = model.get_generator(config)
    if torch.cuda.is_available():
        encoder = encoder.cuda()
        discriminator = discriminator.cuda()
        generator = generator.cuda()
    #classifier = model.get_classifier(config).cuda()
    #gpu_tracker.track()
    #optimC = optim.Adam(classifier.parameters(), lr=config.getfloat('training', 'lr'))
    optimE = optim.Adam(encoder.parameters(), lr=config.getfloat('training', 'lr')*0.01) 
    optimG = optim.Adam(generator.parameters(), lr=config.getfloat('training', 'lr'))
    optimD = optim.Adam(discriminator.parameters(), lr=config.getfloat('training', 'lr'))

    '''
    Quake_Smart_seq2 = data.read_dataset(_path+"../data/Quake_Smart-seq2/data.h5")
    Quake_10x = data.read_dataset(_path+"../data/Quake_10x/data.h5")
    merge = {"A":Quake_Smart_seq2, "B":Quake_10x}
Ejemplo n.º 23
0
def train():
    images, images_path = get_celebA(FLAGS.output_size, FLAGS.n_epoch,
                                     FLAGS.batch_size)
    G = get_generator([None, FLAGS.z_dim])
    D = get_discriminator(
        [None, FLAGS.output_size, FLAGS.output_size, FLAGS.c_dim])

    G.train()
    D.train()

    d_optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate,
                                         beta1=FLAGS.beta1)
    g_optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate,
                                         beta1=FLAGS.beta1)

    n_step_epoch = int(len(images_path) // FLAGS.batch_size)

    for step, batch_images in enumerate(images):
        step_time = time.time()

        with tf.GradientTape(persistent=True) as tape:
            z = tf.contrib.distributions.Normal(0., 1.).sample([
                FLAGS.batch_size, FLAGS.z_dim
            ])  #tf.placeholder(tf.float32, [None, z_dim], name='z_noise')
            d_logits = D(G(z))
            d2_logits = D(batch_images)
            d_loss_real = tl.cost.sigmoid_cross_entropy(
                d2_logits, tf.ones_like(d2_logits), name='real')
            d_loss_fake = tl.cost.sigmoid_cross_entropy(
                d_logits, tf.zeros_like(d_logits), name='fake')

        grad_gd = tape.gradient(d_loss_fake, G.weights + D.weights)
        grad_d1 = tape.gradient(d_loss_real, D.weights)
        scale = -1  #tf.reduce_mean(sigmoid(d_logits)/(sigmoid(d_logits)-1))
        grad_g = grad_gd[0:len(G.weights)]
        for i in range(len(grad_g)):
            if grad_g[i] != None:  # batch_norm moving mean, var
                grad_g[i] = grad_g[i] * scale
            # grad_d1 = list(filter(lambda x: correct_grad(x, scale), grad_d1))
        grad_d2 = grad_gd[len(G.weights):]
        grad_d = []
        for x, y in zip(grad_d1, grad_d2):
            if x == None:  # batch_norm moving mean, var
                grad_d.append(None)
            else:
                grad_d.append(x + y)
        g_optimizer.apply_gradients(zip(grad_g, G.weights))
        d_optimizer.apply_gradients(zip(grad_d, D.weights))
        del tape

        g_loss = d_loss_fake
        d_loss = d_loss_real + d_loss_fake

        print(
            "Epoch: [{}/{}] [{}/{}] took: {:3f}, d_loss: {:5f}, g_loss: {:5f}".
            format(step // n_step_epoch, FLAGS.n_epoch, step, n_step_epoch,
                   time.time() - step_time, d_loss, g_loss))
        if np.mod(step, FLAGS.save_step) == 0:
            G.save_weights('{}/G.npz'.format(FLAGS.checkpoint_dir),
                           format='npz')
            D.save_weights('{}/D.npz'.format(FLAGS.checkpoint_dir),
                           format='npz')
            result = G(z)
            tl.visualize.save_images(
                result.numpy(), [num_tiles, num_tiles],
                '{}/train_{:02d}_{:04d}.png'.format(FLAGS.sample_dir,
                                                    step // n_step_epoch,
                                                    step))
Ejemplo n.º 24
0
def main(args):
    result_dir_path = Path(args.result_dir)
    result_dir_path.mkdir(parents=True, exist_ok=True)

    with Path(args.setting).open("r") as f:
        setting = json.load(f)
    pprint.pprint(setting)

    if args.g >= 0 and torch.cuda.is_available():
        device = torch.device(f"cuda:{args.g:d}")
        print(f"GPU mode: {args.g:d}")
    else:
        device = torch.device("cpu")
        print("CPU mode")

    mnist_neg = get_mnist_num(set(setting["label"]["neg"]))
    neg_loader = DataLoader(mnist_neg,
                            batch_size=setting["iterator"]["batch_size"])

    generator = get_generator().to(device)
    discriminator = get_discriminator().to(device)
    opt_g = torch.optim.Adam(
        generator.parameters(),
        lr=setting["optimizer"]["alpha"],
        betas=(setting["optimizer"]["beta1"], setting["optimizer"]["beta2"]),
        weight_decay=setting["regularization"]["weight_decay"])
    opt_d = torch.optim.Adam(
        discriminator.parameters(),
        lr=setting["optimizer"]["alpha"],
        betas=(setting["optimizer"]["beta1"], setting["optimizer"]["beta2"]),
        weight_decay=setting["regularization"]["weight_decay"])

    trainer = Engine(
        GANTrainer(generator,
                   discriminator,
                   opt_g,
                   opt_d,
                   device=device,
                   **setting["updater"]))

    # テスト用
    test_neg = get_mnist_num(set(setting["label"]["neg"]), train=False)
    test_neg_loader = DataLoader(test_neg, setting["iterator"]["batch_size"])
    test_pos = get_mnist_num(set(setting["label"]["pos"]), train=False)
    test_pos_loader = DataLoader(test_pos, setting["iterator"]["batch_size"])
    detector = Detector(generator, discriminator,
                        setting["updater"]["noise_std"], device).to(device)

    log_dict = {}
    evaluator = evaluate_accuracy(log_dict, detector, test_neg_loader,
                                  test_pos_loader, device)
    plotter = plot_metrics(log_dict, ["accuracy", "precision", "recall", "f"],
                           "iteration", result_dir_path / "metrics.pdf")
    printer = print_logs(log_dict,
                         ["iteration", "accuracy", "precision", "recall", "f"])
    img_saver = save_img(generator, test_pos, test_neg,
                         result_dir_path / "images",
                         setting["updater"]["noise_std"], device)

    trainer.add_event_handler(Events.ITERATION_COMPLETED(every=1000),
                              evaluator)
    trainer.add_event_handler(Events.ITERATION_COMPLETED(every=1000), plotter)
    trainer.add_event_handler(Events.ITERATION_COMPLETED(every=1000), printer)
    trainer.add_event_handler(Events.ITERATION_COMPLETED(every=1000),
                              img_saver)

    # 指定されたiterationで終了
    trainer.add_event_handler(
        Events.ITERATION_COMPLETED(once=setting["iteration"]),
        lambda engine: engine.terminate())
    trainer.run(neg_loader, max_epochs=10**10)
Ejemplo n.º 25
0
def train():
    # Horovod: initialize Horovod.
    hvd.init()
    # Horovod: pin GPU to be used to process local rank (one GPU per process)
    config = tf.ConfigProto()
    config.gpu_options.visible_device_list = str(hvd.local_rank())
    tf.enable_eager_execution(config=config)
    # Horovod: adjust number of steps based on number of GPUs.
    images, images_path = get_celebA(FLAGS.output_size, FLAGS.n_epoch // hvd.size(), FLAGS.batch_size)

    G = get_generator([None, FLAGS.z_dim])
    D = get_discriminator([None, FLAGS.output_size, FLAGS.output_size, FLAGS.c_dim])

    G.train()
    D.train()

    d_optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate * hvd.size(), beta1=FLAGS.beta1) # linear scaling rule
    g_optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate * hvd.size(), beta1=FLAGS.beta1)

    step_counter = tf.train.get_or_create_global_step()

    n_step_epoch = int(len(images_path) // FLAGS.batch_size)

    for step, batch_images in enumerate(images):
        step_time = time.time()
        with tf.GradientTape(persistent=True) as tape:
            z = tf.contrib.distributions.Normal(0., 1.).sample([FLAGS.batch_size, FLAGS.z_dim]) #tf.placeholder(tf.float32, [None, z_dim], name='z_noise')
            d_logits = D(G(z))
            d2_logits = D(batch_images)
            # discriminator: real images are labelled as 1
            d_loss_real = tl.cost.sigmoid_cross_entropy(d2_logits, tf.ones_like(d2_logits), name='dreal')
            # discriminator: images from generator (fake) are labelled as 0
            d_loss_fake = tl.cost.sigmoid_cross_entropy(d_logits, tf.zeros_like(d_logits), name='dfake')
            # cost for updating discriminator
            d_loss = d_loss_real + d_loss_fake
            # generator: try to make the the fake images look real (1)
            g_loss = tl.cost.sigmoid_cross_entropy(d_logits, tf.ones_like(d_logits), name='gfake')

        # Horovod: broadcast initial variable states from rank 0 to all other processes.
        # This is necessary to ensure consistent initialization of all workers when
        # training is started with random weights or restored from a checkpoint.
        if step == 0:
            hvd.broadcast_variables(G.weights, root_rank=0)
            hvd.broadcast_variables(D.weights, root_rank=0)

        # Horovod: add Horovod Distributed GradientTape.
        tape = hvd.DistributedGradientTape(tape)
        #
        grad = tape.gradient(d_loss, D.weights)
        d_optimizer.apply_gradients(zip(grad, D.weights), global_step=tf.train.get_or_create_global_step())
        grad = tape.gradient(g_loss, G.weights)
        g_optimizer.apply_gradients(zip(grad, G.weights), global_step=tf.train.get_or_create_global_step())

        # Horovod: print logging only on worker 0
        if hvd.rank() == 0
            print("Epoch: [{}/{}] [{}/{}] took: {:3f}, d_loss: {:5f}, g_loss: {:5f}".format(step//n_step_epoch, FLAGS.n_epoch, step, n_step_epoch, time.time()-step_time, d_loss, g_loss))

        # Horovod: save checkpoints only on worker 0
        if hvd.rank() == 0 and np.mod(step, FLAGS.save_step) == 0:
            G.save_weights('{}/G.npz'.format(FLAGS.checkpoint_dir), format='npz')
            D.save_weights('{}/D.npz'.format(FLAGS.checkpoint_dir), format='npz')
            result = G(z)
            tl.visualize.save_images(result.numpy(), [num_tiles, num_tiles], '{}/train_{:02d}_{:04d}.png'.format(FLAGS.sample_dir, step//n_step_epoch, step))
Ejemplo n.º 26
0
# setting
mx.random.seed(random.randint(1, 10000))
logging.basicConfig(level=logging.DEBUG)

# create output dir
try:
    os.makedirs(opt.data_path)
except OSError:
    pass

# get training data
train_data = get_training_data(opt.batch_size)

# get model
g_net = get_generator()
d_net = get_descriptor(CTX)

# define loss function
loss = gluon.loss.SigmoidBinaryCrossEntropyLoss()

# initialization
g_net.collect_params().initialize(mx.init.Xavier(), ctx=CTX)
d_net.collect_params().initialize(mx.init.Xavier(), ctx=CTX)
g_trainer = gluon.Trainer(
    g_net.collect_params(), 'Adam', {'learning_rate': LEARNING_RATE, 'beta1': BETA, 'clip_gradient': CLIP_GRADIENT})
d_trainer = gluon.Trainer(
    d_net.collect_params(), 'Adam', {'learning_rate': LEARNING_RATE, 'beta1': BETA, 'clip_gradient': CLIP_GRADIENT})
g_net.collect_params().zero_grad()
d_net.collect_params().zero_grad()
# define evaluation metric
Ejemplo n.º 27
0
def train():
    images, images_path = get_celebA(flags.output_size, flags.n_epoch,
                                     flags.batch_size)
    G = get_generator([None, flags.z_dim])
    D = get_discriminator(
        [None, flags.z_dim],
        [None, flags.output_size, flags.output_size, flags.c_dim])
    E = get_encoder([None, flags.output_size, flags.output_size, flags.c_dim])

    if flags.load_weights:
        E.load_weights('checkpoint/E.npz', format='npz')
        G.load_weights('checkpoint/G.npz', format='npz')
        D.load_weights('checkpoint/D.npz', format='npz')

    G.train()
    D.train()
    E.train()

    d_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)
    g_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)
    e_optimizer = tf.optimizers.Adam(flags.lr, beta_1=flags.beta1)

    n_step_epoch = int(len(images_path) // flags.batch_size)

    for epoch in range(flags.n_epoch):
        for step, batch_images in enumerate(images):
            if batch_images.shape[0] != flags.batch_size:
                break
            step_time = time.time()

            with tf.GradientTape(persistent=True) as tape:
                z = np.random.normal(loc=0.0,
                                     scale=1.0,
                                     size=[flags.batch_size,
                                           flags.z_dim]).astype(np.float32)

                d_logits = D([G(z), z])
                d2_logits = D([batch_images, E(batch_images)])

                d_loss_real = tl.cost.sigmoid_cross_entropy(
                    d2_logits, tf.ones_like(d2_logits), name='dreal')
                d_loss_fake = tl.cost.sigmoid_cross_entropy(
                    d_logits, tf.zeros_like(d_logits), name='dfake')
                d_loss = d_loss_fake + d_loss_real

                g_loss = tl.cost.sigmoid_cross_entropy(d_logits,
                                                       tf.ones_like(d_logits),
                                                       name='gfake')

                e_loss = tl.cost.sigmoid_cross_entropy(
                    d2_logits, tf.zeros_like(d2_logits), name='ereal')

            grad = tape.gradient(g_loss, G.trainable_weights)
            g_optimizer.apply_gradients(zip(grad, G.trainable_weights))
            grad = tape.gradient(d_loss, D.trainable_weights)
            d_optimizer.apply_gradients(zip(grad, D.trainable_weights))
            grad = tape.gradient(e_loss, E.trainable_weights)
            e_optimizer.apply_gradients(zip(grad, E.trainable_weights))

            del tape

            print(
                "Epoch: [{}/{}] [{}/{}] took: {:.3f}, d_loss: {:.5f}, g_loss: {:.5f}, e_loss: {:.5f}"
                .format(epoch, flags.n_epoch, step, n_step_epoch,
                        time.time() - step_time, d_loss, g_loss, e_loss))

        if np.mod(epoch, flags.save_every_epoch) == 0:
            G.save_weights('{}/G.npz'.format(flags.checkpoint_dir),
                           format='npz')
            D.save_weights('{}/D.npz'.format(flags.checkpoint_dir),
                           format='npz')
            E.save_weights('{}/E.npz'.format(flags.checkpoint_dir),
                           format='npz')
            G.eval()
            result = G(z)
            G.train()
            tl.visualize.save_images(
                result.numpy(), [num_tiles, num_tiles],
                '{}/train_{:02d}.png'.format(flags.sample_dir, epoch))

            for step, batch_images in enumerate(images):
                if batch_images.shape[0] != flags.batch_size:
                    break
                result = G(E(batch_images))
                tl.visualize.save_images(
                    batch_images.numpy(), [num_tiles, num_tiles],
                    '{}/real_{:02d}.png'.format(flags.pair_dir, epoch))
                tl.visualize.save_images(
                    result.numpy(), [num_tiles, num_tiles],
                    '{}/reproduced_{:02d}.png'.format(flags.pair_dir, epoch))
                break
Ejemplo n.º 28
0
import os, time, multiprocessing
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from glob import glob
from data import get_celebA, flags
from model import get_generator, get_discriminator
import helper
import matplotlib.pyplot as plt

plt.switch_backend('agg')

if __name__ == '__main__':
    G = get_generator([None, flags.z_dim])
    G.load_weights('checkpoint/G12.npz')
    plt.figure(figsize=(6, 6), dpi=100)
    for i in range(500):
        z = np.random.normal(loc=0.0, scale=1.0,
                             size=[9, flags.z_dim]).astype(np.float32)
        G.eval()
        result = G(z)
        G.train()
        plt.imshow(helper.images_square_grid(result.numpy()))
        plt.axis("off")
        imagePath = '{}/{:003d}_image.png'.format(flags.sample_dir, i + 1)
        if os.path.exists(imagePath):
            continue
        plt.savefig(imagePath, bbox_inches='tight', pad_inches=0)
Ejemplo n.º 29
0
tl.logging.set_verbosity(tl.logging.FATAL)

class FLAGS(object):
    def __init__(self):
        self.z_dim = 100 
        self.output_size = 64 
        self.c_dim = 3 
        self.checkpoint_dir = "checkpoint"
        
flags = FLAGS() 
g_weights = 'checkpoint/G_20.h5'
e_weights = 'checkpoint/E_8.h5'

# load models
G = get_generator()
G.load_weights(g_weights)
G.eval()
E = get_encoder()
E.load_weights(e_weights)
E.eval()

# randomly generate faces
def gen(n=16):
    z = np.random.normal(loc=0.0, scale=1.0, size=[n, flags.z_dim]).astype(np.float32)
    gen = G(z)
    gen = np.array((gen + 1.) * 127.5, dtype=np.uint8)
    for i in range(n):
        plt.imshow(gen[i])
        plt.show()
Ejemplo n.º 30
0
def train():
    images, images_path = get_celebA(FLAGS.output_size, FLAGS.n_epoch,
                                     FLAGS.batch_size)
    G = get_generator([None, FLAGS.z_dim])
    D = get_discriminator(
        [None, FLAGS.output_size, FLAGS.output_size, FLAGS.c_dim])

    G.train()
    D.train()

    d_optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate,
                                         beta1=FLAGS.beta1)
    g_optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate,
                                         beta1=FLAGS.beta1)

    n_step_epoch = int(len(images_path) // FLAGS.batch_size)
    step_time = time.time()
    for step, batch_images in enumerate(images):
        #step_time = time.time()
        with tf.GradientTape(persistent=True) as tape:
            z = tf.contrib.distributions.Normal(0., 1.).sample([
                FLAGS.batch_size, FLAGS.z_dim
            ])  #tf.placeholder(tf.float32, [None, z_dim], name='z_noise')
            d_logits = D(G(z))
            d2_logits = D(batch_images)
            # discriminator: real images are labelled as 1
            d_loss_real = tl.cost.sigmoid_cross_entropy(
                d2_logits, tf.ones_like(d2_logits), name='dreal')
            # discriminator: images from generator (fake) are labelled as 0
            d_loss_fake = tl.cost.sigmoid_cross_entropy(
                d_logits, tf.zeros_like(d_logits), name='dfake')
            # combined loss for updating discriminator
            d_loss = d_loss_real + d_loss_fake
            # generator: try to fool discriminator to output 1
            g_loss = tl.cost.sigmoid_cross_entropy(d_logits,
                                                   tf.ones_like(d_logits),
                                                   name='gfake')

        grad = tape.gradient(g_loss, G.weights)
        g_optimizer.apply_gradients(zip(grad, G.weights))
        grad = tape.gradient(d_loss, D.weights)
        d_optimizer.apply_gradients(zip(grad, D.weights))
        del tape

        #print("Epoch: [{}/{}] [{}/{}] took: {:3f}, d_loss: {:5f}, g_loss: {:5f}".format(step//n_step_epoch, FLAGS.n_epoch, step, n_step_epoch, time.time()-step_time, d_loss, g_loss))
        if np.mod(step, n_step_epoch) == 0:
            fid = tf.contrib.gan.eval.frechet_classifier_distance(
                batch_images, G(z), D, num_batches=8)
            print(
                "Epoch: [{}/{}] [{}/{}] took: {:3f}, d_loss: {:5f}, g_loss: {:5f}, fid: {:5f}"
                .format(step // n_step_epoch, FLAGS.n_epoch, step,
                        n_step_epoch,
                        time.time() - step_time, d_loss, g_loss, fid))
            step_time = time.time()

        if np.mod(step, FLAGS.save_step) == 0:
            G.save_weights('{}/G.npz'.format(FLAGS.checkpoint_dir),
                           format='npz')
            D.save_weights('{}/D.npz'.format(FLAGS.checkpoint_dir),
                           format='npz')
            result = G(z)
            tl.visualize.save_images(
                result.numpy(), [num_tiles, num_tiles],
                '{}/train_{:02d}_{:04d}.png'.format(FLAGS.sample_dir,
                                                    step // n_step_epoch,
                                                    step))

    fid = tf.contrib.gan.eval.frechet_classifier_distance(batch_images,
                                                          G(z),
                                                          D,
                                                          num_batches=8)
    print(
        "Epoch: [{}/{}] [{}/{}] took: {:3f}, d_loss: {:5f}, g_loss: {:5f}, fid: {:5f}"
        .format(step // n_step_epoch, FLAGS.n_epoch, step, n_step_epoch,
                time.time() - step_time, d_loss, g_loss, fid))
    result = G(z).numpy()
    tl.visualize.save_images(
        result, [num_tiles, num_tiles],
        '{}/train_{:02d}_{:04d}.png'.format(FLAGS.sample_dir,
                                            step // n_step_epoch, step))
    for i in range(result.shape[0]):
        tl.visualize.save_image(
            result[i, :, :, :],
            '{}/train_{:02d}.png'.format(FLAGS.sample_dir, i))