def train_ae_appearance_model(exp_config, learning_rate, num_epochs, batch_size, cuda_avail, loss_function, face_images_train_warped):
    face_train_split = face_images_train_warped[:-100]
    face_val_split = face_images_train_warped[-100:]
    # face_trainset = dataset_constructor(face_images_train_warped, transform=transforms.Compose([ImgToTensor()]))
    face_trainset = dataset_constructor(face_train_split, transform=transforms.Compose([ImgToTensor()]))
    face_valset = dataset_constructor(face_val_split, transform=transforms.Compose([ImgToTensor()]))

    face_trainloader = torch.utils.data.DataLoader(face_trainset, 
                                                    batch_size=batch_size, 
                                                    shuffle=False, 
                                                    num_workers=2)
    
    face_valloader = torch.utils.data.DataLoader(face_valset, 
                                                    batch_size=batch_size, 
                                                    shuffle=False, 
                                                    num_workers=2)

    app_model = appearance_autoencoder(latent_dim_size=50)
    optimizer = optim.Adam(app_model.parameters(), lr=learning_rate)
    trainer = ae_trainer(optimizer=optimizer,
                            use_cuda=cuda_avail,
                            model=app_model, 
                            loss_func=loss_function, 
                            model_name="Appearance-AE", exp_config=exp_config)
    
    trainer.train_model(num_epochs, face_trainloader, face_valloader)
Beispiel #2
0
                    required=True,
                    help="model type, either 'ae' or 'vae'")
parser.add_argument('--num_imgs',
                    type=int,
                    default=5,
                    help="number of random images to sample and reconstruct.")
args = parser.parse_args()

# read images
all_face_images_warped = np.load('all-warped-images.npy')
face_images_train_warped = all_face_images_warped[:-100]
face_images_test_warped = all_face_images_warped[-100:]

# get model and weights
if args.model == 'ae':
    app_model = appearance_autoencoder(latent_dim_size=50)
elif args.model == 'vae':
    app_model = appearance_VAE(latent_dim_size=50)
else:
    print("Model {} not recognized. Please use 'ae' or 'vae' only.".format(
        args.model))
app_model.load_state_dict(
    torch.load(args.weights, map_location=lambda storage, loc: storage)())

# get original images
num_imgs = args.num_imgs
img_inds = np.random.choice(np.arange(len(all_face_images_warped)),
                            size=num_imgs,
                            replace=False)
sample_imgs = np.copy(all_face_images_warped[img_inds])
sample_img_tensors = []
Beispiel #3
0
    help="type of faces data to reconstruct from, choose from 'aligned' or 'unaligned'")

args = parser.parse_args()

# read images
if args.faces == 'aligned':
    all_face_images = np.load('all-warped-images.npy')
elif args.faces == 'unaligned':
    all_face_images = np.load('all-raw-images.npy')
face_images_train = all_face_images[:-100]
face_images_test = all_face_images[-100:]

# get model and weights
if args.model == 'ae':
    # app_model = appearance_autoencoder(latent_dim_size=50)
    app_model = appearance_autoencoder(latent_dim_size=args.appear_latent_dim)
elif args.model == 'vae':
    # app_model = appearance_VAE(latent_dim_size=50)
    app_model = appearance_VAE(latent_dim_size=args.appear_latent_dim)
else:
    print("Model {} not recognized. Please use 'ae' or 'vae' only.".format(args.model))
app_model.load_state_dict(torch.load(args.weights, map_location=lambda storage, loc: storage)())


# get original images
num_imgs = args.num_imgs
img_inds = np.random.choice(np.arange(len(all_face_images)), size=num_imgs, replace=False)
sample_imgs = np.copy(all_face_images[img_inds])

# num_imgs = 20
# sample_imgs = np.copy(all_face_images[list(range(800,821))])