예제 #1
0
파일: train.py 프로젝트: musyoku/began
def plot_generator(epoch, progress):
    x = began.generate_x(10000, test=True)
    x.unchain_backward()
    x = began.to_numpy(x)
    try:
        plot_scatter(x,
                     dir=args.plot_dir,
                     filename="generator_scatter_epoch_{}_time_{}min".format(
                         epoch, progress.get_total_time()))
        plot_kde(x,
                 dir=args.plot_dir,
                 filename="generator_kde_epoch_{}_time_{}min".format(
                     epoch, progress.get_total_time()))
    except:
        pass
예제 #2
0
def plot_samples(epoch, progress):
    samples_fale = gan.generate_x(10000, from_gaussian=True)
    samples_fale.unchain_backward()
    samples_fale = gan.to_numpy(samples_fale)
    try:
        plot_scatter(samples_fale,
                     dir=args.plot_dir,
                     filename="scatter_epoch_{}_time_{}min".format(
                         epoch, progress.get_total_time()))
        plot_kde(samples_fale,
                 dir=args.plot_dir,
                 filename="kde_epoch_{}_time_{}min".format(
                     epoch, progress.get_total_time()))
    except:
        pass
예제 #3
0
파일: train.py 프로젝트: musyoku/began
def plot_reconstruction(epoch, progress, x):
    z = began.encode(x, test=True)
    x = began.decode(z, test=True)
    x.unchain_backward()
    x = began.to_numpy(x)
    try:
        plot_scatter(
            x,
            dir=args.plot_dir,
            filename="reconstruction_scatter_epoch_{}_time_{}min".format(
                epoch, progress.get_total_time()))
        plot_kde(x,
                 dir=args.plot_dir,
                 filename="reconstruction_kde_epoch_{}_time_{}min".format(
                     epoch, progress.get_total_time()))
    except:
        pass
예제 #4
0
        z = Variable(Tensor(np.random.normal(0, 1, (opt.batchSize, opt.nz)).astype(np.float32))).to(device)

        optimizer_G.zero_grad()

        # Generate a batch of images
        samples_fake = generator(z)
        # Adversarial loss
        lossG = - torch.sum(discriminator(samples_fake / opt.scale) / opt.batchSize)
        lossG.backward()
        optimizer_G.step()

        # ====================
        # Save to tensorborad
        # ====================
        if(i == 0 or (i % opt.n_display_step == 0)):
            board_train.add_scalar('Generater/loss_G', lossG.item(), iterations)
            board_train.add_scalar('Discriminator/loss_D', lossD.item(), iterations)
            # Monitor trainnig progresses
            print("epoch={}, iters={}, loss_G={:.5f}, loss_C={:.5f}".format(epoch, iterations, lossG, lossD))
        
    # ============
    # Save images
    # ============
    generator.eval()
    z_fixed = Variable(Tensor(np.random.normal(0, 1, (10000, opt.nz)).astype(np.float32))).to(device)
    with torch.no_grad():
        samples_fake = generator(z_fixed)

    plot_scatter(samples_fake.cpu().numpy(), dir=os.path.join(opt.dir_out, opt.exper_name), filename="scatter_epoches{}".format(epoch))
    plot_kde(samples_fake.cpu().numpy(), dir=os.path.join(opt.dir_out, opt.exper_name), filename="kde_epoches{}".format(epoch))
예제 #5
0
파일: train.py 프로젝트: qnduan/wgan-scrna
    prog.add_loss_dis()
    z = sampler.sample_z(config['dim_z'],
                         batchsize,
                         gaussian=config['gaussian'])
    z = Variable(torch.from_numpy(z))
    samples_fake = gen_net(z)
    samples_fake /= config['scale']
    f_fake = dis_net(samples_fake)
    loss_gen = -f_fake.mean()
    prog.add_loss_gen(loss_gen)

    gen_optim.zero_grad()
    loss_gen.backward()
    gen_optim.step()

    if (i + 1) % config['num_plot'] == 0:
        print(i + 1)
        z = sampler.sample_z(config['dim_z'],
                             10000,
                             gaussian=config['gaussian'])
        z = Variable(torch.from_numpy(z))
        samples_fake = gen_net(z).data.numpy()
        plot.plot_scatter(samples_fake,
                          dir='plot',
                          filename='{}_scatter'.format(i + 1))
        plot.plot_kde(samples_fake,
                      dir='plot',
                      filename='{}_kde'.format(i + 1))
prog.plot()
예제 #6
0
        loss_critic.backward()
        dis_optim.step()

    prog.add_loss_dis()
    z = sampler.sample_z(config['dim_z'],
                         batchsize,
                         gaussian=config['gaussian'])
    z = Variable(torch.from_numpy(z).cuda())
    samples_fake = gen_net(z)
    samples_fake /= config['scale']
    f_fake = dis_net(samples_fake)
    loss_gen = -f_fake.mean()
    prog.add_loss_gen(loss_gen.data.cpu().numpy()[0])

    gen_optim.zero_grad()
    loss_gen.backward()
    gen_optim.step()

    if (i + 1) % config['num_plot'] == 0:
        print(i + 1)
        z = sampler.sample_z(config['dim_z'],
                             10000,
                             gaussian=config['gaussian'])
        z = Variable(torch.from_numpy(z).cuda())
        samples_fake = gen_net(z).data.cpu().numpy()
        plot.plot_scatter(samples_fake,
                          filename='{}_scatter'.format(i + 1),
                          show=True)
        plot.plot_kde(samples_fake, filename='{}_kde'.format(i + 1), show=True)
prog.plot()