shuffle=True,
                   drop_last=True))

    encoder = Encoder()
    encoder.apply(weights_init)

    decoder = Decoder()
    decoder.apply(weights_init)

    encoder.load_state_dict(
        torch.load(os.path.join('checkpoints', ENCODER_SAVE)))
    decoder.load_state_dict(
        torch.load(os.path.join('checkpoints', DECODER_SAVE)))

    encoder.eval()
    decoder.eval()

    prediction_model = Prediction_Model()
    prediction_model.apply(weights_init)

    if (CUDA):
        encoder.cuda()
        decoder.cuda()
        prediction_model.cuda()

    optimizer = torch.optim.Adam(list(prediction_model.parameters()),
                                 lr=LR,
                                 betas=(BETA1, BETA2))
    mse_loss = nn.MSELoss()

    # number of frames that the prediction network will take as input
Example #2
0
                   drop_last=True))

    # initialize summary writer
    writer = SummaryWriter()

    sigma_p_inv, det_p = setup_pz(NUM_FEA, FEA_DIM, FEA)

    # creating copies of encoder-decoder objects for style transfer visualization during training
    encoder_test = Encoder()
    encoder_test.apply(weights_init)

    decoder_test = Decoder()
    decoder_test.apply(weights_init)

    encoder_test.eval()
    decoder_test.eval()

    if (CUDA):
        encoder_test.cuda()
        decoder_test.cuda()

    lowest_loss = float('inf')

    for epoch in range(START_EPOCH, END_EPOCH):
        epoch_loss = 0
        for iteration in range(len(dataset) // BATCH_SIZE):

            # load a batch of videos
            X_in = next(loader).float().cuda()

            Y_flat = X_in.view(X_in.size()[0], -1)
                   shuffle=True,
                   drop_last=True))

    encoder = Encoder()
    encoder.apply(weights_init)

    decoder = Decoder()
    decoder.apply(weights_init)

    encoder.load_state_dict(
        torch.load(os.path.join('checkpoints/', ENCODER_SAVE)))
    decoder.load_state_dict(
        torch.load(os.path.join('checkpoints/', DECODER_SAVE)))

    encoder.eval().cuda()
    decoder.eval().cuda()

    video1 = next(loader).float().cuda()[0].unsqueeze(0)
    video2 = next(loader).float().cuda()[0].unsqueeze(0)

    X1, KL1, muL1, det_q1 = encoder(video1)
    X2, KL2, muL2, det_q2 = encoder(video2)

    # save reconstructed images
    dec_v1 = decoder(X1)
    save_image(dec_v1.squeeze(0).transpose(2, 3),
               './results/style_transfer_results/recon_v1.png',
               nrow=NUM_FRAMES,
               normalize=True)

    dec_v2 = decoder(X2)