示例#1
0
                               requires_grad=True)
        f_mid = dis_net(samples_mid)
        # [0]:outputs tuple
        # torch.ones(): grad can be implicitly created only for scalar outputs
        grad_mid = grad(f_mid,
                        samples_mid,
                        torch.ones(f_mid.size()).cuda(),
                        create_graph=True)[0]
        grad_mid_norm = grad_mid.norm(dim=1)
        diff = (grad_mid_norm - 1).pow(2)

        f_true = dis_net(samples_true)
        f_fake = dis_net(samples_fake)
        loss_critic = f_fake.mean() - f_true.mean() + \
            (config['lda']*diff).mean()
        prog.add_loss_critic(loss_critic.data.cpu().numpy()[0])

        dis_optim.zero_grad()
        loss_critic.backward()
        dis_optim.step()

    prog.add_loss_dis()
    z = sampler.sample_z(config['dim_z'],
                         batchsize,
                         gaussian=config['gaussian'])
    z = Variable(torch.from_numpy(z).cuda())
    samples_fake = gen_net(z)
    samples_fake /= config['scale']
    f_fake = dis_net(samples_fake)
    loss_gen = -f_fake.mean()
    prog.add_loss_gen(loss_gen.data.cpu().numpy()[0])
示例#2
0
                                                       config['num_mixture'],
                                                       scale=config['scale'],
                                                       std=config['std'])
        samples_true /= config['scale']
        samples_true = Variable(torch.from_numpy(samples_true))
        z = sampler.sample_z(config['dim_z'],
                             batchsize,
                             gaussian=config['gaussian'])
        z = Variable(torch.from_numpy(z))
        samples_fake = gen_net(z).detach()
        samples_fake /= config['scale']

        f_true = dis_net(samples_true)
        f_fake = dis_net(samples_fake)
        loss_critic = f_fake.mean() - f_true.mean()
        prog.add_loss_critic(loss_critic)

        dis_optim.zero_grad()
        loss_critic.backward()
        dis_optim.step()
        dis_net.clip()

    prog.add_loss_dis()
    z = sampler.sample_z(config['dim_z'],
                         batchsize,
                         gaussian=config['gaussian'])
    z = Variable(torch.from_numpy(z))
    samples_fake = gen_net(z)
    samples_fake /= config['scale']
    f_fake = dis_net(samples_fake)
    loss_gen = -f_fake.mean()
示例#3
0
            (samples_fake_numpy.T*eps2).T
        samples_mid = Variable(cuda(torch.from_numpy(
            samples_mid_numpy)),requires_grad=True)
        f_mid = dis_net(samples_mid)
        # [0]:outputs tuple
        # torch.ones(): grad can be implicitly created only for scalar outputs
        grad_mid = grad(f_mid,samples_mid,cuda(torch.ones(f_mid.size())),
            create_graph=True)[0]
        grad_mid_norm = grad_mid.norm(dim=1)
        diff = (grad_mid_norm - 1).pow(2)

        f_true = dis_net(samples_true)
        f_fake = dis_net(samples_fake)
        loss_critic = f_fake.mean() - f_true.mean() + \
            (config['lda']*diff).mean()
        prog.add_loss_critic(cpu(loss_critic.data).numpy()[0])

        dis_optim.zero_grad()
        loss_critic.backward()
        dis_optim.step()

    prog.add_loss_dis()
    z = sampler.sample_z(batchsize,config['dim_z'])
    z = Variable(cuda(torch.from_numpy(z)))
    samples_fake = gen_net(z)
    f_fake = dis_net(samples_fake)
    loss_gen = -f_fake.mean()
    prog.add_loss_gen(cpu(loss_gen.data).numpy()[0])

    gen_optim.zero_grad()
    loss_gen.backward()