def run_method_1(): # config discriminator_config = gan.config_discriminator generator_config = gan.config_generator num_col = 10 num_generation = 20 batchsize = 2 * num_generation base_z = gan.to_variable(gan.sample_z(batchsize)) # optimize z class_true = gan.to_variable(np.zeros(batchsize, dtype=np.int32)) for n in xrange(5): x_fake = gan.generate_x_from_z(base_z, test=True, as_numpy=False) discrimination_fake, _ = gan.discriminate(x_fake, apply_softmax=False, test=True) cross_entropy = F.softmax_cross_entropy(discrimination_fake, class_true) gan.backprop_generator(cross_entropy) base_z = gan.to_variable(base_z.data + base_z.grad * 0.01) base_z = gan.to_numpy(base_z) sum_z = np.sum(base_z) if sum_z != sum_z: raise Exception("NaN") mix_z = np.zeros((num_col * num_generation, generator_config.ndim_input), dtype=np.float32) for g in xrange(num_generation): for i in xrange(num_col): mix_z[g * num_col + i] = base_z[2 * g] * (i / float(num_col)) + base_z[ 2 * g + 1] * (1 - i / float(num_col)) x_negative = gan.generate_x_from_z(mix_z, test=True, as_numpy=True) x_negative = (x_negative + 1.0) / 2.0 visualizer.tile_rgb_images(x_negative.transpose(0, 2, 3, 1), dir=args.plot_dir, filename="analogy_1", row=num_generation, col=num_col)
def main(): # load MNIST images images, labels = dataset.load_train_images() # config discriminator_config = gan.config_discriminator generator_config = gan.config_generator # settings max_epoch = 1000 num_updates_per_epoch = 500 plot_interval = 5 batchsize_true = 100 batchsize_fake = batchsize_true # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # training progress = Progress() for epoch in xrange(1, max_epoch + 1): progress.start_epoch(epoch, max_epoch) sum_loss_critic = 0 sum_loss_generator = 0 for t in xrange(num_updates_per_epoch): for k in xrange(discriminator_config.num_critic): # clamp parameters to a cube gan.clip_discriminator_weights() # gan.decay_discriminator_weights() # sample true data from data distribution images_true = dataset.sample_data(images, batchsize_true, binarize=False) # sample fake data from generator images_fake = gan.generate_x(batchsize_fake) images_fake.unchain_backward() fw_true, activations_true = gan.discriminate(images_true) fw_fake, _ = gan.discriminate(images_fake) loss_critic = -F.sum(fw_true - fw_fake) / batchsize_true sum_loss_critic += float(loss_critic.data) / discriminator_config.num_critic # update discriminator gan.backprop_discriminator(loss_critic) # generator loss images_fake = gan.generate_x(batchsize_fake) fw_fake, activations_fake = gan.discriminate(images_fake) loss_generator = -F.sum(fw_fake) / batchsize_fake # feature matching if discriminator_config.use_feature_matching: features_true = activations_true[-1] features_true.unchain_backward() if batchsize_true != batchsize_fake: images_fake = gan.generate_x(batchsize_true) _, activations_fake = gan.discriminate(images_fake, apply_softmax=False) features_fake = activations_fake[-1] loss_generator += F.mean_squared_error(features_true, features_fake) # update generator gan.backprop_generator(loss_generator) sum_loss_generator += float(loss_generator.data) if t % 10 == 0: progress.show(t, num_updates_per_epoch, {}) gan.save(args.model_dir) progress.show(num_updates_per_epoch, num_updates_per_epoch, { "wasserstein": -sum_loss_critic / num_updates_per_epoch, "loss_g": sum_loss_generator / num_updates_per_epoch, }) if epoch % plot_interval == 0 or epoch == 1: plot(filename="epoch_{}_time_{}min".format(epoch, progress.get_total_time()))
def main(): # load MNIST images images, labels = dataset.load_train_images() # config discriminator_config = gan.config_discriminator generator_config = gan.config_generator # settings # _l -> labeled # _u -> unlabeled # _g -> generated max_epoch = 1000 num_trains_per_epoch = 500 plot_interval = 5 batchsize_l = 100 batchsize_u = 100 batchsize_g = batchsize_u # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # save validation accuracy per epoch csv_results = [] # create semi-supervised split num_validation_data = 10000 num_labeled_data = args.num_labeled if batchsize_l > num_labeled_data: batchsize_l = num_labeled_data training_images_l, training_labels_l, training_images_u, validation_images, validation_labels = dataset.create_semisupervised( images, labels, num_validation_data, num_labeled_data, discriminator_config.ndim_output, seed=args.seed) print training_labels_l # training progress = Progress() for epoch in xrange(1, max_epoch): progress.start_epoch(epoch, max_epoch) sum_loss_supervised = 0 sum_loss_unsupervised = 0 sum_loss_adversarial = 0 sum_dx_labeled = 0 sum_dx_unlabeled = 0 sum_dx_generated = 0 gan.update_learning_rate(get_learning_rate_for_epoch(epoch)) for t in xrange(num_trains_per_epoch): # sample from data distribution images_l, label_onehot_l, label_ids_l = dataset.sample_labeled_data( training_images_l, training_labels_l, batchsize_l, discriminator_config.ndim_input, discriminator_config.ndim_output, binarize=False) images_u = dataset.sample_unlabeled_data( training_images_u, batchsize_u, discriminator_config.ndim_input, binarize=False) images_g = gan.generate_x(batchsize_g) images_g.unchain_backward() # supervised loss py_x_l, activations_l = gan.discriminate(images_l, apply_softmax=False) loss_supervised = F.softmax_cross_entropy( py_x_l, gan.to_variable(label_ids_l)) log_zx_l = F.logsumexp(py_x_l, axis=1) log_dx_l = log_zx_l - F.softplus(log_zx_l) dx_l = F.sum(F.exp(log_dx_l)) / batchsize_l # unsupervised loss # D(x) = Z(x) / {Z(x) + 1}, where Z(x) = \sum_{k=1}^K exp(l_k(x)) # softplus(x) := log(1 + exp(x)) # logD(x) = logZ(x) - log(Z(x) + 1) # = logZ(x) - log(exp(log(Z(x))) + 1) # = logZ(x) - softplus(logZ(x)) # 1 - D(x) = 1 / {Z(x) + 1} # log{1 - D(x)} = log1 - log(Z(x) + 1) # = -log(exp(log(Z(x))) + 1) # = -softplus(logZ(x)) py_x_u, _ = gan.discriminate(images_u, apply_softmax=False) log_zx_u = F.logsumexp(py_x_u, axis=1) log_dx_u = log_zx_u - F.softplus(log_zx_u) dx_u = F.sum(F.exp(log_dx_u)) / batchsize_u loss_unsupervised = -F.sum( log_dx_u) / batchsize_u # minimize negative logD(x) py_x_g, _ = gan.discriminate(images_g, apply_softmax=False) log_zx_g = F.logsumexp(py_x_g, axis=1) loss_unsupervised += F.sum(F.softplus( log_zx_g)) / batchsize_u # minimize negative log{1 - D(x)} # update discriminator gan.backprop_discriminator(loss_supervised + loss_unsupervised) # adversarial loss images_g = gan.generate_x(batchsize_g) py_x_g, activations_g = gan.discriminate(images_g, apply_softmax=False) log_zx_g = F.logsumexp(py_x_g, axis=1) log_dx_g = log_zx_g - F.softplus(log_zx_g) dx_g = F.sum(F.exp(log_dx_g)) / batchsize_g loss_adversarial = -F.sum( log_dx_g) / batchsize_u # minimize negative logD(x) # feature matching if discriminator_config.use_feature_matching: features_true = activations_l[-1] features_true.unchain_backward() if batchsize_l != batchsize_g: images_g = gan.generate_x(batchsize_l) _, activations_g = gan.discriminate(images_g, apply_softmax=False) features_fake = activations_g[-1] loss_adversarial += F.mean_squared_error( features_true, features_fake) # update generator gan.backprop_generator(loss_adversarial) sum_loss_supervised += float(loss_supervised.data) sum_loss_unsupervised += float(loss_unsupervised.data) sum_loss_adversarial += float(loss_adversarial.data) sum_dx_labeled += float(dx_l.data) sum_dx_unlabeled += float(dx_u.data) sum_dx_generated += float(dx_g.data) if t % 10 == 0: progress.show(t, num_trains_per_epoch, {}) gan.save(args.model_dir) # validation images_l, _, label_ids_l = dataset.sample_labeled_data( validation_images, validation_labels, num_validation_data, discriminator_config.ndim_input, discriminator_config.ndim_output, binarize=False) images_l_segments = np.split(images_l, num_validation_data // 500) label_ids_l_segments = np.split(label_ids_l, num_validation_data // 500) sum_accuracy = 0 for images_l, label_ids_l in zip(images_l_segments, label_ids_l_segments): y_distribution, _ = gan.discriminate(images_l, apply_softmax=True, test=True) accuracy = F.accuracy(y_distribution, gan.to_variable(label_ids_l)) sum_accuracy += float(accuracy.data) validation_accuracy = sum_accuracy / len(images_l_segments) progress.show( num_trains_per_epoch, num_trains_per_epoch, { "loss_l": sum_loss_supervised / num_trains_per_epoch, "loss_u": sum_loss_unsupervised / num_trains_per_epoch, "loss_g": sum_loss_adversarial / num_trains_per_epoch, "dx_l": sum_dx_labeled / num_trains_per_epoch, "dx_u": sum_dx_unlabeled / num_trains_per_epoch, "dx_g": sum_dx_generated / num_trains_per_epoch, "accuracy": validation_accuracy, }) # write accuracy to csv csv_results.append( [epoch, validation_accuracy, progress.get_total_time()]) data = pd.DataFrame(csv_results) data.columns = ["epoch", "accuracy", "min"] data.to_csv("{}/result.csv".format(args.model_dir)) if epoch % plot_interval == 0 or epoch == 1: plot(filename="epoch_{}_time_{}min".format( epoch, progress.get_total_time()))
def main(): images = load_rgb_images(args.image_dir) # config discriminator_config = gan.config_discriminator generator_config = gan.config_generator # settings max_epoch = 1000 num_updates_per_epoch = 500 batchsize_true = 128 batchsize_fake = 128 plot_interval = 5 # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # init weightnorm layers if discriminator_config.use_weightnorm: print "initializing weight normalization layers of the discriminator ..." x_true = sample_from_data(images, batchsize_true) gan.discriminate(x_true) if generator_config.use_weightnorm: print "initializing weight normalization layers of the generator ..." gan.generate_x(batchsize_fake) # training progress = Progress() for epoch in xrange(1, max_epoch + 1): progress.start_epoch(epoch, max_epoch) sum_loss_unsupervised = 0 sum_loss_adversarial = 0 sum_dx_unlabeled = 0 sum_dx_generated = 0 for t in xrange(num_updates_per_epoch): # sample data x_true = sample_from_data(images, batchsize_true) x_fake = gan.generate_x(batchsize_fake) x_fake.unchain_backward() # unsupervised loss # D(x) = Z(x) / {Z(x) + 1}, where Z(x) = \sum_{k=1}^K exp(l_k(x)) # softplus(x) := log(1 + exp(x)) # logD(x) = logZ(x) - log(Z(x) + 1) # = logZ(x) - log(exp(log(Z(x))) + 1) # = logZ(x) - softplus(logZ(x)) # 1 - D(x) = 1 / {Z(x) + 1} # log{1 - D(x)} = log1 - log(Z(x) + 1) # = -log(exp(log(Z(x))) + 1) # = -softplus(logZ(x)) log_zx_u, activations_u = gan.discriminate(x_true, apply_softmax=False) log_dx_u = log_zx_u - F.softplus(log_zx_u) dx_u = F.sum(F.exp(log_dx_u)) / batchsize_true loss_unsupervised = -F.sum( log_dx_u) / batchsize_true # minimize negative logD(x) py_x_g, _ = gan.discriminate(x_fake, apply_softmax=False) log_zx_g = F.logsumexp(py_x_g, axis=1) loss_unsupervised += F.sum(F.softplus( log_zx_g)) / batchsize_true # minimize negative log{1 - D(x)} # update discriminator gan.backprop_discriminator(loss_unsupervised) sum_loss_unsupervised += float(loss_unsupervised.data) sum_dx_unlabeled += float(dx_u.data) # generator loss x_fake = gan.generate_x(batchsize_fake) log_zx_g, activations_g = gan.discriminate(x_fake, apply_softmax=False) log_dx_g = log_zx_g - F.softplus(log_zx_g) dx_g = F.sum(F.exp(log_dx_g)) / batchsize_fake loss_generator = -F.sum( log_dx_g) / batchsize_true # minimize negative logD(x) # feature matching if discriminator_config.use_feature_matching: features_true = activations_u[-1] features_true.unchain_backward() if batchsize_true != batchsize_fake: x_fake = gan.generate_x(batchsize_true) _, activations_g = gan.discriminate(x_fake, apply_softmax=False) features_fake = activations_g[-1] loss_generator += F.mean_squared_error(features_true, features_fake) # update generator gan.backprop_generator(loss_generator) sum_loss_adversarial += float(loss_generator.data) sum_dx_generated += float(dx_g.data) if t % 10 == 0: progress.show(t, num_updates_per_epoch, {}) gan.save(args.model_dir) progress.show( num_updates_per_epoch, num_updates_per_epoch, { "loss_u": sum_loss_unsupervised / num_updates_per_epoch, "loss_g": sum_loss_adversarial / num_updates_per_epoch, "dx_u": sum_dx_unlabeled / num_updates_per_epoch, "dx_g": sum_dx_generated / num_updates_per_epoch, }) if epoch % plot_interval == 0 or epoch == 1: plot(filename="epoch_{}_time_{}min".format( epoch, progress.get_total_time()))
def main(): # config discriminator_config = gan.config_discriminator generator_config = gan.config_generator # labels a = discriminator_config.a b = discriminator_config.b c = discriminator_config.c # settings max_epoch = 200 num_updates_per_epoch = 500 plot_interval = 5 batchsize_true = 100 batchsize_fake = batchsize_true scale = 2.0 # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # training progress = Progress() plot_samples(0, progress) for epoch in xrange(1, max_epoch + 1): progress.start_epoch(epoch, max_epoch) sum_loss_d = 0 sum_loss_g = 0 for t in xrange(num_updates_per_epoch): # sample from data distribution samples_true = sampler.gaussian_mixture_circle( batchsize_true, generator_config.num_mixture, scale=scale, std=0.2) # sample from generator samples_fale = gan.generate_x(batchsize_true, from_gaussian=True) samples_fale.unchain_backward() d_true = gan.discriminate(samples_true / scale, return_activations=False) d_fake = gan.discriminate(samples_fale / scale, return_activations=False) loss_d = 0.5 * (F.sum((d_true - b)**2) + F.sum( (d_fake - a)**2)) / batchsize_true sum_loss_d += float(loss_d.data) # update discriminator gan.backprop_discriminator(loss_d) # generator loss samples_fale = gan.generate_x(batchsize_fake, from_gaussian=True) d_fake = gan.discriminate(samples_fale / scale, return_activations=False) loss_g = 0.5 * (F.sum((d_fake - c)**2)) / batchsize_fake sum_loss_g += float(loss_g.data) # update generator gan.backprop_generator(loss_g) if t % 10 == 0: progress.show(t, num_updates_per_epoch, {}) gan.save(args.model_dir) progress.show( num_updates_per_epoch, num_updates_per_epoch, { "loss_d": sum_loss_d / num_updates_per_epoch, "loss_g": sum_loss_g / num_updates_per_epoch, }) if epoch % plot_interval == 0 or epoch == 1: plot_samples(epoch, progress)
def main(): # config discriminator_config = gan.config_discriminator generator_config = gan.config_generator # settings max_epoch = 200 num_updates_per_epoch = 500 plot_interval = 5 batchsize_true = 100 batchsize_fake = batchsize_true scale = 2.0 # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # training progress = Progress() plot_samples(0, progress) for epoch in xrange(1, max_epoch + 1): progress.start_epoch(epoch, max_epoch) sum_loss_critic = 0 sum_loss_generator = 0 for t in xrange(num_updates_per_epoch): for k in xrange(discriminator_config.num_critic): # clamp parameters to a cube # gan.clip_discriminator_weights() gan.decay_discriminator_weights() # sample from data distribution samples_true = sampler.gaussian_mixture_circle( batchsize_true, generator_config.num_mixture, scale=scale, std=0.2) # sample from generator samples_fale = gan.generate_x(batchsize_true, from_gaussian=True) samples_fale.unchain_backward() fw_true, activations_true = gan.discriminate(samples_true / scale) fw_fake, _ = gan.discriminate(samples_fale / scale) loss_critic = -F.sum(fw_true - fw_fake) / batchsize_true sum_loss_critic += float( loss_critic.data) / discriminator_config.num_critic # update discriminator gan.backprop_discriminator(loss_critic) # generator loss samples_fale = gan.generate_x(batchsize_fake, from_gaussian=True) fw_fake, activations_fake = gan.discriminate(samples_fale / scale) loss_generator = -F.sum(fw_fake) / batchsize_fake # feature matching if discriminator_config.use_feature_matching: features_true = activations_true[-1] features_true.unchain_backward() if batchsize_true != batchsize_fake: samples_fale = gan.generate_x(batchsize_true, from_gaussian=True) _, activations_fake = gan.discriminate(samples_fale / scale) features_fake = activations_fake[-1] loss_generator += F.mean_squared_error(features_true, features_fake) # update generator gan.backprop_generator(loss_generator) sum_loss_generator += float(loss_generator.data) if t % 10 == 0: progress.show(t, num_updates_per_epoch, {}) gan.save(args.model_dir) progress.show( num_updates_per_epoch, num_updates_per_epoch, { "wasserstein": -sum_loss_critic / num_updates_per_epoch, "loss_g": sum_loss_generator / num_updates_per_epoch, }) if epoch % plot_interval == 0 or epoch == 1: plot_samples(epoch, progress)
def main(): # load MNIST images images, labels = dataset.load_train_images() # config discriminator_config = gan.config_discriminator generator_config = gan.config_generator # labels a = discriminator_config.a b = discriminator_config.b c = discriminator_config.c # settings max_epoch = 1000 num_updates_per_epoch = 500 plot_interval = 5 batchsize_true = 100 batchsize_fake = batchsize_true # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # training progress = Progress() for epoch in xrange(1, max_epoch + 1): progress.start_epoch(epoch, max_epoch) sum_loss_d = 0 sum_loss_g = 0 for t in xrange(num_updates_per_epoch): # sample true data from data distribution images_true = dataset.sample_data(images, batchsize_true, binarize=False) # sample fake data from generator images_fake = gan.generate_x(batchsize_fake) images_fake.unchain_backward() d_true = gan.discriminate(images_true, return_activations=False) d_fake = gan.discriminate(images_fake, return_activations=False) loss_d = 0.5 * (F.sum((d_true - b)**2) + F.sum( (d_fake - a)**2)) / batchsize_true sum_loss_d += float(loss_d.data) # update discriminator gan.backprop_discriminator(loss_d) # generator loss images_fake = gan.generate_x(batchsize_fake) d_fake = gan.discriminate(images_fake, return_activations=False) loss_g = 0.5 * (F.sum((d_fake - c)**2)) / batchsize_fake sum_loss_g += float(loss_g.data) # update generator gan.backprop_generator(loss_g) if t % 10 == 0: progress.show(t, num_updates_per_epoch, {}) gan.save(args.model_dir) progress.show( num_updates_per_epoch, num_updates_per_epoch, { "loss_d": sum_loss_d / num_updates_per_epoch, "loss_g": sum_loss_g / num_updates_per_epoch, }) if epoch % plot_interval == 0 or epoch == 1: plot(filename="epoch_{}_time_{}min".format( epoch, progress.get_total_time()))
def main(): images = load_rgb_images(args.image_dir) # config discriminator_config = gan.config_discriminator generator_config = gan.config_generator # labels a = discriminator_config.a b = discriminator_config.b c = discriminator_config.c # settings max_epoch = 1000 num_updates_per_epoch = 500 batchsize_true = 128 batchsize_fake = 128 plot_interval = 5 # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # init weightnorm layers if discriminator_config.use_weightnorm: print "initializing weight normalization layers of the discriminator ..." images_true = sample_from_data(images, batchsize_true) gan.discriminate(images_true) if generator_config.use_weightnorm: print "initializing weight normalization layers of the generator ..." gan.generate_x(batchsize_fake) # training progress = Progress() for epoch in xrange(1, max_epoch + 1): progress.start_epoch(epoch, max_epoch) sum_loss_d = 0 sum_loss_g = 0 for t in xrange(num_updates_per_epoch): # sample data images_true = sample_from_data(images, batchsize_true) images_fake = gan.generate_x(batchsize_true) images_fake.unchain_backward() d_true = gan.discriminate(images_true, return_activations=False) d_fake = gan.discriminate(images_fake, return_activations=False) loss_d = 0.5 * (F.sum((d_true - b)**2) + F.sum( (d_fake - a)**2)) / batchsize_true sum_loss_d += float(loss_d.data) # update discriminator gan.backprop_discriminator(loss_d) # generator loss images_fake = gan.generate_x(batchsize_fake) d_fake = gan.discriminate(images_fake, return_activations=False) loss_g = 0.5 * (F.sum((d_fake - c)**2)) / batchsize_fake sum_loss_g += float(loss_g.data) # update generator gan.backprop_generator(loss_g) if t % 10 == 0: progress.show(t, num_updates_per_epoch, {}) gan.save(args.model_dir) progress.show( num_updates_per_epoch, num_updates_per_epoch, { "loss_d": sum_loss_d / num_updates_per_epoch, "loss_g": sum_loss_g / num_updates_per_epoch, }) if epoch % plot_interval == 0 or epoch == 1: plot(filename="epoch_{}_time_{}min".format( epoch, progress.get_total_time()))
def main(): images = load_rgb_images(args.image_dir) # config discriminator_config = gan.config_discriminator generator_config = gan.config_generator # settings max_epoch = 1000 num_updates_per_epoch = 500 batchsize_true = 128 batchsize_fake = 128 plot_interval = 5 # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # init weightnorm layers if discriminator_config.use_weightnorm: print "initializing weight normalization layers of the discriminator ..." x_true = sample_from_data(images, batchsize_true) gan.discriminate(x_true) if generator_config.use_weightnorm: print "initializing weight normalization layers of the generator ..." gan.generate_x(batchsize_fake) # training progress = Progress() for epoch in xrange(1, max_epoch + 1): progress.start_epoch(epoch, max_epoch) sum_loss_critic = 0 sum_loss_generator = 0 learning_rate = get_learning_rate_for_epoch(epoch) gan.update_learning_rate(learning_rate) for t in xrange(num_updates_per_epoch): for k in xrange(discriminator_config.num_critic): # clamp parameters to a cube gan.clip_discriminator_weights() # gan.scale_discriminator_weights() # sample data x_true = sample_from_data(images, batchsize_true) x_fake = gan.generate_x(batchsize_true) x_fake.unchain_backward() fw_u, activations_u = gan.discriminate(x_true) fw_g, _ = gan.discriminate(x_fake) loss_critic = -F.sum(fw_u - fw_g) / batchsize_true sum_loss_critic += float(loss_critic.data) / discriminator_config.num_critic # update discriminator gan.backprop_discriminator(loss_critic) # generator loss x_fake = gan.generate_x(batchsize_fake) fw_g, activations_g = gan.discriminate(x_fake) loss_generator = -F.sum(fw_g) / batchsize_fake # feature matching if discriminator_config.use_feature_matching: features_true = activations_u[-1] features_true.unchain_backward() if batchsize_true != batchsize_fake: x_fake = gan.generate_x(batchsize_true) _, activations_g = gan.discriminate(x_fake, apply_softmax=False) features_fake = activations_g[-1] loss_generator += F.mean_squared_error(features_true, features_fake) # update generator gan.backprop_generator(loss_generator) sum_loss_generator += float(loss_generator.data) if t % 10 == 0: progress.show(t, num_updates_per_epoch, {}) gan.save(args.model_dir) progress.show(num_updates_per_epoch, num_updates_per_epoch, { "wasserstein": -sum_loss_critic / num_updates_per_epoch, "loss_g": sum_loss_generator / num_updates_per_epoch, "lr": learning_rate }) if epoch % plot_interval == 0 or epoch == 1: plot(filename="epoch_{}_time_{}min".format(epoch, progress.get_total_time()))
def main(): images = load_rgb_images(args.image_dir) # config config = chainer.config # settings max_epoch = 1000 num_updates_per_epoch = 500 batchsize_true = 128 batchsize_fake = 128 plot_interval = 5 # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # training progress = Progress() for epoch in xrange(1, max_epoch + 1): with chainer.using_config("train", True): progress.start_epoch(epoch, max_epoch) sum_loss_critic = 0 sum_loss_generator = 0 learning_rate = get_learning_rate_for_epoch(epoch) gan.update_learning_rate(learning_rate) for t in xrange(num_updates_per_epoch): for k in xrange(config.discriminator.num_critic): # clamp parameters to a cube gan.clip_discriminator_weights() # sample data x_true = sample_from_data(images, batchsize_true) x_fake = gan.generate_x(batchsize_true) x_fake.unchain_backward() fw_u, activations_u = gan.discriminate(x_true) fw_g, _ = gan.discriminate(x_fake) loss_critic = -F.sum(fw_u - fw_g) / batchsize_true sum_loss_critic += float( loss_critic.data) / config.discriminator.num_critic # update discriminator gan.backprop_discriminator(loss_critic) # generator loss x_fake = gan.generate_x(batchsize_fake) fw_g, activations_g = gan.discriminate(x_fake) loss_generator = -F.sum(fw_g) / batchsize_fake # update generator gan.backprop_generator(loss_generator) sum_loss_generator += float(loss_generator.data) if t % 10 == 0: progress.show(t, num_updates_per_epoch, {}) gan.save(args.model_dir) progress.show( num_updates_per_epoch, num_updates_per_epoch, { "wasserstein": -sum_loss_critic / num_updates_per_epoch, "loss_g": sum_loss_generator / num_updates_per_epoch, "lr": learning_rate }) with chainer.using_config("train", False): if epoch % plot_interval == 0 or epoch == 1: plot(filename="epoch_{}_time_{}min".format( epoch, progress.get_total_time()))
def main(): images = load_rgb_images(args.image_dir) # config discriminator_config = gan.config_discriminator generator_config = gan.config_generator # settings max_epoch = 1000 n_trains_per_epoch = 500 batchsize_true = 128 batchsize_fake = 128 plot_interval = 5 # seed np.random.seed(args.seed) if args.gpu_device != -1: cuda.cupy.random.seed(args.seed) # init weightnorm layers if discriminator_config.use_weightnorm: print "initializing weight normalization layers of the discriminator ..." x_true = sample_from_data(images, batchsize_true) gan.discriminate(x_true) if generator_config.use_weightnorm: print "initializing weight normalization layers of the generator ..." gan.generate_x(batchsize_fake) # classification # 0 -> true sample # 1 -> generated sample class_true = gan.to_variable(np.zeros(batchsize_true, dtype=np.int32)) class_fake = gan.to_variable(np.ones(batchsize_fake, dtype=np.int32)) # training progress = Progress() for epoch in xrange(1, max_epoch): progress.start_epoch(epoch, max_epoch) sum_loss_discriminator = 0 sum_loss_generator = 0 sum_loss_vat = 0 for t in xrange(n_trains_per_epoch): # sample data x_true = sample_from_data(images, batchsize_true) x_fake = gan.generate_x(batchsize_fake).data # unchain # train discriminator discrimination_true, activations_true = gan.discriminate( x_true, apply_softmax=False) discrimination_fake, _ = gan.discriminate(x_fake, apply_softmax=False) loss_discriminator = F.softmax_cross_entropy( discrimination_true, class_true) + F.softmax_cross_entropy( discrimination_fake, class_fake) gan.backprop_discriminator(loss_discriminator) # virtual adversarial training loss_vat = 0 if discriminator_config.use_virtual_adversarial_training: z = gan.sample_z(batchsize_fake) loss_vat = -F.sum(gan.compute_lds(z)) / batchsize_fake gan.backprop_discriminator(loss_vat) sum_loss_vat += float(loss_vat.data) # train generator x_fake = gan.generate_x(batchsize_fake) discrimination_fake, activations_fake = gan.discriminate( x_fake, apply_softmax=False) loss_generator = F.softmax_cross_entropy(discrimination_fake, class_true) # feature matching if discriminator_config.use_feature_matching: features_true = activations_true[-1] features_fake = activations_fake[-1] loss_generator += F.mean_squared_error(features_true, features_fake) gan.backprop_generator(loss_generator) sum_loss_discriminator += float(loss_discriminator.data) sum_loss_generator += float(loss_generator.data) if t % 10 == 0: progress.show(t, n_trains_per_epoch, {}) progress.show( n_trains_per_epoch, n_trains_per_epoch, { "loss_d": sum_loss_discriminator / n_trains_per_epoch, "loss_g": sum_loss_generator / n_trains_per_epoch, "loss_vat": sum_loss_vat / n_trains_per_epoch, }) gan.save(args.model_dir) if epoch % plot_interval == 0 or epoch == 1: plot(filename="epoch_{}_time_{}min".format( epoch, progress.get_total_time()))