Exemple #1
0
def run_method_1():
	# config
	discriminator_config = gan.config_discriminator
	generator_config = gan.config_generator

	num_col = 10
	num_generation = 20
	batchsize = 2 * num_generation
	base_z = gan.sample_z(batchsize)

	mix_z = np.zeros((num_col * num_generation, generator_config.ndim_input), dtype=np.float32)
	for g in xrange(num_generation):
		for i in xrange(num_col):
			mix_z[g * num_col + i] = base_z[2 * g] * (i / float(num_col)) + base_z[2 * g + 1] * (1 - i / float(num_col))

	x_negative = gan.generate_x_from_z(mix_z, test=True, as_numpy=True)
	x_negative = (x_negative + 1.0) / 2.0
	
	# optimize z
	# xp = gan.xp
	# x_fake = gan.generate_x_from_z(mix_z, test=True, as_numpy=False)
	# x_fake.unchain_backward()
	# for n in xrange(500):
	# 	discrimination_fake, _ = gan.discriminate(x_fake, test=True)
	# 	opt = F.sum(discrimination_fake)
	# 	print opt.data
	# 	opt.backward()
	# 	# gan.backprop_generator(-F.sum(discrimination_fake))
	# 	x_fake = gan.to_variable(xp.clip(x_fake.data + x_fake.grad * 0.01, -1, 1))

	# x_negative = gan.to_numpy(x_fake)
	# x_negative = (x_negative + 1.0) / 2.0

	plot.tile_rgb_images(x_negative.transpose(0, 2, 3, 1), dir=args.plot_dir, filename="analogy_1", row=num_generation, col=num_col)
Exemple #2
0
def run_method_1():
	# config
	discriminator_config = gan.config_discriminator
	generator_config = gan.config_generator

	num_col = 20
	num_generation = 20
	batchsize = 2 * num_generation
	base_z = gan.sample_z(batchsize)

	mix_z = np.zeros((num_col * num_generation, generator_config.ndim_input), dtype=np.float32)
	for g in xrange(num_generation):
		for i in xrange(num_col):
			mix_z[g * num_col + i] = base_z[2 * g] * (i / float(num_col)) + base_z[2 * g + 1] * (1 - i / float(num_col))

	x_negative = gan.generate_x_from_z(mix_z, test=True, as_numpy=True)
	plot.tile_binary_images(x_negative.reshape((-1, 28, 28)), dir=args.plot_dir, filename="analogy_1", row=num_generation, col=num_col)
Exemple #3
0
def run_method_1():
    # config
    discriminator_config = gan.config_discriminator
    generator_config = gan.config_generator

    num_col = 10
    num_generation = 20
    batchsize = 2 * num_generation
    base_z = gan.to_variable(gan.sample_z(batchsize))

    # optimize z
    class_true = gan.to_variable(np.zeros(batchsize, dtype=np.int32))
    for n in xrange(5):
        x_fake = gan.generate_x_from_z(base_z, test=True, as_numpy=False)
        discrimination_fake, _ = gan.discriminate(x_fake,
                                                  apply_softmax=False,
                                                  test=True)
        cross_entropy = F.softmax_cross_entropy(discrimination_fake,
                                                class_true)
        gan.backprop_generator(cross_entropy)
        base_z = gan.to_variable(base_z.data + base_z.grad * 0.01)
    base_z = gan.to_numpy(base_z)
    sum_z = np.sum(base_z)
    if sum_z != sum_z:
        raise Exception("NaN")

    mix_z = np.zeros((num_col * num_generation, generator_config.ndim_input),
                     dtype=np.float32)
    for g in xrange(num_generation):
        for i in xrange(num_col):
            mix_z[g * num_col +
                  i] = base_z[2 * g] * (i / float(num_col)) + base_z[
                      2 * g + 1] * (1 - i / float(num_col))

    x_negative = gan.generate_x_from_z(mix_z, test=True, as_numpy=True)
    x_negative = (x_negative + 1.0) / 2.0
    visualizer.tile_rgb_images(x_negative.transpose(0, 2, 3, 1),
                               dir=args.plot_dir,
                               filename="analogy_1",
                               row=num_generation,
                               col=num_col)
Exemple #4
0
def main():
    images = load_rgb_images(args.image_dir)

    # config
    discriminator_config = gan.config_discriminator
    generator_config = gan.config_generator

    # settings
    max_epoch = 1000
    n_trains_per_epoch = 500
    batchsize_true = 128
    batchsize_fake = 128
    plot_interval = 5

    # seed
    np.random.seed(args.seed)
    if args.gpu_device != -1:
        cuda.cupy.random.seed(args.seed)

    # init weightnorm layers
    if discriminator_config.use_weightnorm:
        print "initializing weight normalization layers of the discriminator ..."
        x_true = sample_from_data(images, batchsize_true)
        gan.discriminate(x_true)

    if generator_config.use_weightnorm:
        print "initializing weight normalization layers of the generator ..."
        gan.generate_x(batchsize_fake)

    # classification
    # 0 -> true sample
    # 1 -> generated sample
    class_true = gan.to_variable(np.zeros(batchsize_true, dtype=np.int32))
    class_fake = gan.to_variable(np.ones(batchsize_fake, dtype=np.int32))

    # training
    progress = Progress()
    for epoch in xrange(1, max_epoch):
        progress.start_epoch(epoch, max_epoch)
        sum_loss_discriminator = 0
        sum_loss_generator = 0
        sum_loss_vat = 0

        for t in xrange(n_trains_per_epoch):
            # sample data
            x_true = sample_from_data(images, batchsize_true)
            x_fake = gan.generate_x(batchsize_fake).data  # unchain

            # train discriminator
            discrimination_true, activations_true = gan.discriminate(
                x_true, apply_softmax=False)
            discrimination_fake, _ = gan.discriminate(x_fake,
                                                      apply_softmax=False)
            loss_discriminator = F.softmax_cross_entropy(
                discrimination_true, class_true) + F.softmax_cross_entropy(
                    discrimination_fake, class_fake)
            gan.backprop_discriminator(loss_discriminator)

            # virtual adversarial training
            loss_vat = 0
            if discriminator_config.use_virtual_adversarial_training:
                z = gan.sample_z(batchsize_fake)
                loss_vat = -F.sum(gan.compute_lds(z)) / batchsize_fake
                gan.backprop_discriminator(loss_vat)
                sum_loss_vat += float(loss_vat.data)

            # train generator
            x_fake = gan.generate_x(batchsize_fake)
            discrimination_fake, activations_fake = gan.discriminate(
                x_fake, apply_softmax=False)
            loss_generator = F.softmax_cross_entropy(discrimination_fake,
                                                     class_true)

            # feature matching
            if discriminator_config.use_feature_matching:
                features_true = activations_true[-1]
                features_fake = activations_fake[-1]
                loss_generator += F.mean_squared_error(features_true,
                                                       features_fake)

            gan.backprop_generator(loss_generator)

            sum_loss_discriminator += float(loss_discriminator.data)
            sum_loss_generator += float(loss_generator.data)
            if t % 10 == 0:
                progress.show(t, n_trains_per_epoch, {})

        progress.show(
            n_trains_per_epoch, n_trains_per_epoch, {
                "loss_d": sum_loss_discriminator / n_trains_per_epoch,
                "loss_g": sum_loss_generator / n_trains_per_epoch,
                "loss_vat": sum_loss_vat / n_trains_per_epoch,
            })
        gan.save(args.model_dir)

        if epoch % plot_interval == 0 or epoch == 1:
            plot(filename="epoch_{}_time_{}min".format(
                epoch, progress.get_total_time()))