示例#1
0
    for batch_idx, (data, target) in enumerate(train_loader, 0):
        netD.zero_grad()
        mb_size = data.size(0)
        X.data.resize_(data.size()).copy_(data)
        X.data.resize_(mb_size, x_dim)
        z.data.resize_(mb_size, z_dim).normal_(0, 1)
        label.data.resize_(mb_size)

        G_share_sample = netG_share(z)
        G_indep_sample = create_netG_indeps_sample(netG_indeps, G_share_sample)

        ############################
        # (1) Update D network:
        ###########################
        D_real = netD(X)
        D_fake = netD_fake(G_indep_sample, netD)
        D_loss = compute_dloss(D_real, D_fake, label)
        D_exp.add_scalar_value('D_loss',
                               D_loss.data[0],
                               step=batch_idx + it * train_size)
        D_loss.backward(retain_variables=True)
        D_solver.step()

        ############################
        # (2) Update G network:
        ###########################
        D_fake = netD_fake(G_indep_sample, netD)
        G_losses, index = compute_gloss(D_fake, label)
        mutil_backward(G_losses, netG_share, netG_indeps, index)
        mutil_steps(G_losses, G_share_solver, G_indep_solver, index)
        add2experiments(G_losses, G_exps, step=batch_idx + it * train_size)
        mb_size = data.size(0)
        X.data.resize_(data.size()).copy_(data)
        X.data.resize_(mb_size, x_dim)
        z.data.resize_(mb_size, z_dim).normal_(0, 1)
        label.data.resize_(mb_size)
        cnt = batch_idx + it * train_size

        G_indep_sample = create_netG_indeps_sample(netG_indeps, z)
        G_share_sample = create_netG_share_sample(netG_share, G_indep_sample)

        ############################
        # (1) Update D network:  vaillgan loss
        ###########################
        if not netD_continue_trian:
            D_real = netD(X)
            D_fake = netD_fake(G_share_sample, netD)
            D_loss = compute_dloss(D_real, D_fake, label)
            D_exp.add_scalar_value('D_loss', D_loss.data[0], step=cnt)
            D_loss.backward(retain_variables=True)
            D_solver.step()
        else:
            D_real = netD(X).data.numpy()[0][0]
            D_fake = netD_fake(G_share_sample, netD)
            D_fake = compute_mean(D_fake, 10.0).data.numpy()[0]
            print 'D_real:{}----/D_fake:{}'.format(D_real, D_fake)

        ############################
        # (2) Update G network:
        ###########################
        D_fake = netD_fake(G_share_sample, netD)
        G_losses, index = compute_gloss(D_fake, label)