Exemplo n.º 1
0
def main():
    num_samples = 10000
    samples_true = sampler.gaussian_mixture_circle(
        num_samples,
        num_cluster=generator_params["config"]["num_mixture"],
        scale=2,
        std=0.2)
    plot_scatter(samples_true, args.plot_dir, "scatter_true")
    plot_kde(samples_true, args.plot_dir, "kde_true")
    samples_fake = gan.to_numpy(gan.generate_x(num_samples, test=True))
    plot_scatter(samples_fake, args.plot_dir, "scatter_gen")
    plot_kde(samples_fake, args.plot_dir, "kde_gen")
Exemplo n.º 2
0
def main():
    num_samples = 10000
    samples_true = sampler.gaussian_mixture_circle(num_samples,
                                                   num_cluster=8,
                                                   scale=2,
                                                   std=0.2)
    samples_true2 = sampler.gaussian_mixture_double_circle(num_samples,
                                                           num_cluster=8,
                                                           scale=2,
                                                           std=0.2)
    plot_scatter(samples_true, "./results", "scatter_true")
    plot_kde(samples_true, "./results", "kde_true")
    plot_scatter(samples_true2, "./results", "scatter_true2")
    plot_kde(samples_true2, "./results", "kde_true2")
Exemplo n.º 3
0
def main():
    # settings
    max_epoch = 200
    num_updates_per_epoch = 500
    plot_interval = 5
    batchsize = 16
    scale = 2.0
    config = began.config

    # seed
    np.random.seed(args.seed)
    if args.gpu_device != -1:
        cuda.cupy.random.seed(args.seed)

    # training
    kt = 0
    lambda_k = 0.001
    progress = Progress()
    plot_generator(0, progress)
    for epoch in xrange(1, max_epoch + 1):
        progress.start_epoch(epoch, max_epoch)
        sum_loss_d = 0
        sum_loss_g = 0
        sum_M = 0

        for t in xrange(num_updates_per_epoch):
            # sample data
            samples_real = sampler.gaussian_mixture_circle(batchsize,
                                                           config.num_mixture,
                                                           scale=scale,
                                                           std=0.2)
            samples_fake = began.generate_x(batchsize)

            loss_real = began.compute_loss(samples_real)
            loss_fake = began.compute_loss(samples_fake)

            loss_d = loss_real - kt * loss_fake
            loss_g = loss_fake

            began.backprop_discriminator(loss_d)
            began.backprop_generator(loss_g)

            loss_d = float(loss_d.data)
            loss_g = float(loss_g.data)
            loss_real = float(loss_real.data)
            loss_fake = float(loss_fake.data)

            sum_loss_d += loss_d
            sum_loss_g += loss_g

            # update control parameters
            kt += lambda_k * (config.gamma * loss_real - loss_fake)
            kt = max(0, min(1, kt))
            M = loss_real + abs(config.gamma * loss_real - loss_fake)
            sum_M += M

            if t % 10 == 0:
                progress.show(t, num_updates_per_epoch, {})

        began.save(args.model_dir)

        progress.show(
            num_updates_per_epoch, num_updates_per_epoch, {
                "loss_d": sum_loss_d / num_updates_per_epoch,
                "loss_g": sum_loss_g / num_updates_per_epoch,
                "k": kt,
                "M": sum_M / num_updates_per_epoch,
            })

        if epoch % plot_interval == 0 or epoch == 1:
            plot_generator(epoch, progress)
            plot_reconstruction(
                epoch, progress,
                sampler.gaussian_mixture_circle(10000,
                                                config.num_mixture,
                                                scale=scale,
                                                std=0.2))
Exemplo n.º 4
0
def main():
    # config
    discriminator_config = gan.config_discriminator
    generator_config = gan.config_generator

    # settings
    max_epoch = 200
    num_updates_per_epoch = 500
    plot_interval = 5
    batchsize_true = 100
    batchsize_fake = batchsize_true
    scale = 2.0

    # seed
    np.random.seed(args.seed)
    if args.gpu_device != -1:
        cuda.cupy.random.seed(args.seed)

    # training
    progress = Progress()
    plot_samples(0, progress)
    for epoch in xrange(1, max_epoch + 1):
        progress.start_epoch(epoch, max_epoch)
        sum_loss_critic = 0
        sum_loss_generator = 0

        for t in xrange(num_updates_per_epoch):

            for k in xrange(discriminator_config.num_critic):
                # clamp parameters to a cube
                # gan.clip_discriminator_weights()
                gan.decay_discriminator_weights()

                # sample from data distribution
                samples_true = sampler.gaussian_mixture_circle(
                    batchsize_true,
                    generator_config.num_mixture,
                    scale=scale,
                    std=0.2)
                # sample from generator
                samples_fale = gan.generate_x(batchsize_true,
                                              from_gaussian=True)
                samples_fale.unchain_backward()

                fw_true, activations_true = gan.discriminate(samples_true /
                                                             scale)
                fw_fake, _ = gan.discriminate(samples_fale / scale)

                loss_critic = -F.sum(fw_true - fw_fake) / batchsize_true
                sum_loss_critic += float(
                    loss_critic.data) / discriminator_config.num_critic

                # update discriminator
                gan.backprop_discriminator(loss_critic)

            # generator loss
            samples_fale = gan.generate_x(batchsize_fake, from_gaussian=True)
            fw_fake, activations_fake = gan.discriminate(samples_fale / scale)
            loss_generator = -F.sum(fw_fake) / batchsize_fake

            # feature matching
            if discriminator_config.use_feature_matching:
                features_true = activations_true[-1]
                features_true.unchain_backward()
                if batchsize_true != batchsize_fake:
                    samples_fale = gan.generate_x(batchsize_true,
                                                  from_gaussian=True)
                    _, activations_fake = gan.discriminate(samples_fale /
                                                           scale)
                features_fake = activations_fake[-1]
                loss_generator += F.mean_squared_error(features_true,
                                                       features_fake)

            # update generator
            gan.backprop_generator(loss_generator)
            sum_loss_generator += float(loss_generator.data)

            if t % 10 == 0:
                progress.show(t, num_updates_per_epoch, {})

        gan.save(args.model_dir)

        progress.show(
            num_updates_per_epoch, num_updates_per_epoch, {
                "wasserstein": -sum_loss_critic / num_updates_per_epoch,
                "loss_g": sum_loss_generator / num_updates_per_epoch,
            })

        if epoch % plot_interval == 0 or epoch == 1:
            plot_samples(epoch, progress)
Exemplo n.º 5
0
def main():
    # config
    discriminator_config = gan.config_discriminator
    generator_config = gan.config_generator

    # labels
    a = discriminator_config.a
    b = discriminator_config.b
    c = discriminator_config.c

    # settings
    max_epoch = 200
    num_updates_per_epoch = 500
    plot_interval = 5
    batchsize_true = 100
    batchsize_fake = batchsize_true
    scale = 2.0

    # seed
    np.random.seed(args.seed)
    if args.gpu_device != -1:
        cuda.cupy.random.seed(args.seed)

    # training
    progress = Progress()
    plot_samples(0, progress)
    for epoch in xrange(1, max_epoch + 1):
        progress.start_epoch(epoch, max_epoch)
        sum_loss_d = 0
        sum_loss_g = 0

        for t in xrange(num_updates_per_epoch):
            # sample from data distribution
            samples_true = sampler.gaussian_mixture_circle(
                batchsize_true,
                generator_config.num_mixture,
                scale=scale,
                std=0.2)
            # sample from generator
            samples_fale = gan.generate_x(batchsize_true, from_gaussian=True)
            samples_fale.unchain_backward()

            d_true = gan.discriminate(samples_true / scale,
                                      return_activations=False)
            d_fake = gan.discriminate(samples_fale / scale,
                                      return_activations=False)

            loss_d = 0.5 * (F.sum((d_true - b)**2) + F.sum(
                (d_fake - a)**2)) / batchsize_true
            sum_loss_d += float(loss_d.data)

            # update discriminator
            gan.backprop_discriminator(loss_d)

            # generator loss
            samples_fale = gan.generate_x(batchsize_fake, from_gaussian=True)
            d_fake = gan.discriminate(samples_fale / scale,
                                      return_activations=False)
            loss_g = 0.5 * (F.sum((d_fake - c)**2)) / batchsize_fake
            sum_loss_g += float(loss_g.data)

            # update generator
            gan.backprop_generator(loss_g)

            if t % 10 == 0:
                progress.show(t, num_updates_per_epoch, {})

        gan.save(args.model_dir)

        progress.show(
            num_updates_per_epoch, num_updates_per_epoch, {
                "loss_d": sum_loss_d / num_updates_per_epoch,
                "loss_g": sum_loss_g / num_updates_per_epoch,
            })

        if epoch % plot_interval == 0 or epoch == 1:
            plot_samples(epoch, progress)
Exemplo n.º 6
0
import sampler
from progress import Progress

prog = Progress()
config = qn.load('hyperparams.yml')
batchsize = config['batchsize']
dis_net = DisNet(config['dim_x'])
gen_net = GenNet(config['dim_z'], config['dim_x'])

dis_optim = optim.RMSprop(dis_net.parameters(), lr=config['dis_lr'])
gen_optim = optim.Adam(gen_net.parameters(), lr=config['gen_lr'])

for i in range(config['num_updates']):
    for _ in range(config['num_critic']):
        samples_true = sampler.gaussian_mixture_circle(batchsize,
                                                       config['num_mixture'],
                                                       scale=config['scale'],
                                                       std=config['std'])
        samples_true /= config['scale']
        samples_true = Variable(torch.from_numpy(samples_true))
        z = sampler.sample_z(config['dim_z'],
                             batchsize,
                             gaussian=config['gaussian'])
        z = Variable(torch.from_numpy(z))
        samples_fake = gen_net(z).detach()
        samples_fake /= config['scale']

        f_true = dis_net(samples_true)
        f_fake = dis_net(samples_fake)
        loss_critic = f_fake.mean() - f_true.mean()
        prog.add_loss_critic(loss_critic)
Exemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser(
        description=
        'Learning cumulative distribution function with Monotonic Networks:')
    parser.add_argument(
        '--dataset',
        '-d',
        default='gaussian_1d',
        help='The dataset to use: gaussian_1d or gaussian_mix_2d')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=128,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=100,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    if args.dataset == 'gaussian_1d':
        train = sampler.gaussian_1d(numpy, 4096)
        test = sampler.gaussian_1d(numpy, 1024)
    elif args.dataset == 'gaussian_mix_2d':
        train = sampler.gaussian_mixture_circle(numpy, 32768)
        test = sampler.gaussian_mixture_circle(numpy, 1024)
    elif args.dataset == 'gaussian_half_1d':
        train = sampler.half_gaussian_1d(numpy, 16384)
        test = sampler.half_gaussian_1d(numpy, 1024)
    elif args.dataset == 'half_gaussian_2d':
        train = sampler.truncated_gaussian_circle(numpy, 32768)
        test = sampler.truncated_gaussian_circle(numpy, 1024)
    else:
        raise RuntimeError('Invalid dataset: {}.'.format(args.dataset))

    if train.shape[1] == 1:
        model = models.ProbabilityDistributionNetwork(1, [16, 16, 16],
                                                      [16, 16], 4)
    elif train.shape[1] == 2:
        model = models.ProbabilityDistributionNetwork(2, [32, 32, 32],
                                                      [32, 32], 8)
    else:
        raise RuntimeError('Invalid dataset.')

    if args.gpu >= 0:
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()

    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test, args.batchsize, False,
                                                 False)

    stop_trigger = (args.epoch, 'epoch')

    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, stop_trigger, out=args.out)
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    trainer.extend(
        extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}'),
        trigger=(10, 'epoch'))
    trainer.extend(extensions.LogReport())
    trainer.extend(
        extensions.PrintReport(
            ['epoch', 'main/loss', 'validation/main/loss', 'elapsed_time']))
    trainer.extend(visualize.Visualize(model, test))

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
Exemplo n.º 8
0
def main():
    num_samples = 10000
    samples_true = sampler.gaussian_mixture_circle(
        num_samples, num_cluster=args.num_mixture, scale=2, std=0.2)
    plot_scatter(samples_true, args.plot_dir, "scatter_true")
    plot_kde(samples_true, args.plot_dir, "kde_true")