Example #1
0
                   shuffle=True,
                   drop_last=True))

    # initialize summary writer
    writer = SummaryWriter()

    sigma_p_inv, det_p = setup_pz(NUM_FEA, FEA_DIM, FEA)

    # creating copies of encoder-decoder objects for style transfer visualization during training
    encoder_test = Encoder()
    encoder_test.apply(weights_init)

    decoder_test = Decoder()
    decoder_test.apply(weights_init)

    encoder_test.eval()
    decoder_test.eval()

    if (CUDA):
        encoder_test.cuda()
        decoder_test.cuda()

    lowest_loss = float('inf')

    for epoch in range(START_EPOCH, END_EPOCH):
        epoch_loss = 0
        for iteration in range(len(dataset) // BATCH_SIZE):

            # load a batch of videos
            X_in = next(loader).float().cuda()
                   batch_size=BATCH_SIZE,
                   shuffle=True,
                   drop_last=True))

    encoder = Encoder()
    encoder.apply(weights_init)

    decoder = Decoder()
    decoder.apply(weights_init)

    encoder.load_state_dict(
        torch.load(os.path.join('checkpoints/', ENCODER_SAVE)))
    decoder.load_state_dict(
        torch.load(os.path.join('checkpoints/', DECODER_SAVE)))

    encoder.eval().cuda()
    decoder.eval().cuda()

    video1 = next(loader).float().cuda()[0].unsqueeze(0)
    video2 = next(loader).float().cuda()[0].unsqueeze(0)

    X1, KL1, muL1, det_q1 = encoder(video1)
    X2, KL2, muL2, det_q2 = encoder(video2)

    # save reconstructed images
    dec_v1 = decoder(X1)
    save_image(dec_v1.squeeze(0).transpose(2, 3),
               './results/style_transfer_results/recon_v1.png',
               nrow=NUM_FRAMES,
               normalize=True)
                   batch_size=BATCH_SIZE,
                   shuffle=True,
                   drop_last=True))

    encoder = Encoder()
    encoder.apply(weights_init)

    decoder = Decoder()
    decoder.apply(weights_init)

    encoder.load_state_dict(
        torch.load(os.path.join('checkpoints', ENCODER_SAVE)))
    decoder.load_state_dict(
        torch.load(os.path.join('checkpoints', DECODER_SAVE)))

    encoder.eval()
    decoder.eval()

    prediction_model = Prediction_Model()
    prediction_model.apply(weights_init)

    if (CUDA):
        encoder.cuda()
        decoder.cuda()
        prediction_model.cuda()

    optimizer = torch.optim.Adam(list(prediction_model.parameters()),
                                 lr=LR,
                                 betas=(BETA1, BETA2))
    mse_loss = nn.MSELoss()
Example #4
0
                        g_loss.item(),
                        'gf_loss':
                        gf_loss.item(),
                        'fm_loss':
                        fm_loss.item(),
                        'vgg_loss':
                        vgg_loss.item()
                        if type(vgg_loss) is torch.Tensor else vgg_loss,
                        'kl_loss':
                        kl_loss.item()
                    }, it, 'G')
                add_scalar_dict(writer, {
                    'lr_G': decayed_lr_G,
                    'lr_D': decayed_lr_D
                }, it, 'LR')
                E.eval()
                G.eval()
                with torch.no_grad():
                    mu, logvar = E(fixed_reals)
                    latents = sample_latent(mu, logvar)
                    samples = G(latents, fixed_annos_onehot)
                    vutils.save_image(
                        samples,
                        join(sample_path,
                             '{:03d}_{:07d}_fake.jpg'.format(ep, it)),
                        nrow=4,
                        padding=0,
                        normalize=True,
                        range=(-1., 1.))
            it += 1