예제 #1
0
def load_model(filename):
    checkpoint = torch.load(filename, map_location=device)
    global generator
    global discriminator
    generator = Generator(len(data['mapping']), data['max_length'],
                          latent_size)

    generator.load_state_dict(checkpoint['generator_model_state_dict'])
    generator.eval()

    discriminator = Discriminator(len(data['mapping']))
    discriminator.load_state_dict(checkpoint['discriminator_model_state_dict'])
    discriminator.eval()

    print('Loaded model')
예제 #2
0
        g_optimizer.step()
        g_train_loss = g_loss.item()

        ### --------------  END GENERATOR STEP ------------------------
        losses_train.append((d_train_wloss, d_train_gploss, g_train_loss))

        if i_batch % print_every == 0:
            print('Epoch [{:5d}/{:5d}] | d_loss: {:6.4f} | g_loss: {:6.4f}'.
                  format(i_epoch + 1, num_epochs, d_loss.item(),
                         g_loss.item()))

    # ----- End training epoch ------

    # ----------- Validation Loop --------------
    G.eval()
    D.eval()
    for i_batch in range(N_val_btot):

        (data_b, i_vc) = sdat_val.get_rand_batch()
        real_wfs = torch.from_numpy(data_b).float()
        i_vc = [torch.from_numpy(i_v).float() for i_v in i_vc]
        Nsamp = real_wfs.size(0)
        if cuda:
            real_wfs = real_wfs.cuda()
            i_vc = [i_v.cuda() for i_v in i_vc]

        z = noise(Nbatch, z_size)
        z = torch.from_numpy(z).float()
        if cuda:
            z = z.cuda()
        fake_wfs = G(z, *i_vc)
예제 #3
0
        netG.zero_grad()
        out_fake = torch.sigmoid(netD(x_fake))
        lossG = criterion(out_fake, valid)
        lossG.backward()
        optimizerG.step()

        # Print training stats
        if i % n_print == 0:
            print(
                "[Epoch {:5}/{:5}] [Batch {:3}/{:3}] [D loss: {:2.6f}] [G loss: {:2.6f}]"
                .format(epoch, n_epochs, i, len(loader['train']),
                        fake_lossD.item() + real_lossD.item(), lossG.item()))

    # Validation
    netD.eval()
    netG.eval()
    valLossD, val_size = 0, 0
    for data in loader['val']:
        x_real = data['moves'].type(Tensor)

        valid = Tensor(x_real.size(0), 1).fill_(1)
        fake = Tensor(x_real.size(0), 1).fill_(0)

        z = torch.randn(x_real.size(0), latent_size, 1, 1, device=device)
        x_fake = netG(z)

        out_real = torch.sigmoid(netD(x_real))
        out_fake = torch.sigmoid(netD(x_fake))

        valReal_lossD = criterion(out_real, valid)