for params in generator_model.parameters():
    params.requires_grad = True
for params in discriminator_model_pose.parameters():
    params.requires_grad = True
for params in discriminator_model_conf.parameters():
    params.requires_grad = True

# Use dataparallel
generator_model = nn.DataParallel(generator_model)
discriminator_model_conf = nn.DataParallel(discriminator_model_conf)
discriminator_model_pose = nn.DataParallel(discriminator_model_pose)

# Datasets
if args.dataset == 'lsp':
    lsp_train_dataset = LSP(args)
    args.mode = 'val'
    lsp_val_dataset = LSP(args)
# medical
if args.dataset == 'medical':
    lsp_train_dataset = HANDXRAY(args)
    args.mode = 'val'
    lsp_val_dataset = HANDXRAY(args)
# MPII
elif args.dataset == 'mpii':
    lsp_train_dataset = MPII('train')
    lsp_val_dataset = MPII('valid')  ## MPII('val') was present originally

# Dataset and the Dataloade
train_loader = torch.utils.data.DataLoader(lsp_train_dataset,
                                           batch_size=args.batch_size,
        config['dataset']['num_joints'],
        config['discriminator']['num_residuals'])

    # Load
    model_data = torch.load(args.modelName)
    generator_model = model_data['generator_model']

    # Use dataparallel
    generator_model = nn.DataParallel(generator_model)
    discriminator_model = nn.DataParallel(discriminator_model)

    generator_model = (generator_model).module
    discriminator_model = (discriminator_model).module

    # Dataset and the Dataloader
    lsp_train_dataset = LSP(args)
    args.mode = 'val'
    lsp_val_dataset = MPII('val')  #LSP(args)
    train_loader = torch.utils.data.DataLoader(lsp_train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    val_save_loader = torch.utils.data.DataLoader(lsp_val_dataset,
                                                  batch_size=args.batch_size)
    val_eval_loader = torch.utils.data.DataLoader(lsp_val_dataset,
                                                  batch_size=args.batch_size,
                                                  shuffle=True)

    # Loading on GPU, if available
    if (args.use_gpu):
        generator_model = generator_model.to(fast_device)
        discriminator_model = discriminator_model.to(fast_device)