def main(): parser = argparse.ArgumentParser(description="Training DCGAN on CelebA dataset") parser.add_argument("--checkpoint_dir", type=str, default="./model/checkpoint", help="Path to write checkpoint") parser.add_argument("--progress_dir", type=str, default="./data/face_gan", help="Path to write training progress image") parser.add_argument("--dataset_dir", type=str, required=True, help="Path to dataset") parser.add_argument("--latent_dim", type=int, default=100, help="Latent space dimension") parser.add_argument("--test_size", type=int, default=4, help="Square root number of test images to control training progress") parser.add_argument("--batch_size", type=int, default=100, help="Number of training steps per epoch") parser.add_argument("--lr", type=float, default=0.0002, help="Learning rate") parser.add_argument("--epochs", type=int, default=20, help="Number of epochs for training") args = vars(parser.parse_args()) validate_path(args["checkpoint_dir"]) validate_path(args["progress_dir"]) datagen = DataSet(args["dataset_dir"]) dataset, total_steps = datagen.build(batch_size=args["batch_size"]) DCGAN = Trainer(progress_dir=args["progress_dir"], checkpoint_dir=args["checkpoint_dir"], z_dim=args["latent_dim"], test_size=args["test_size"], batch_size=args["batch_size"], learning_rate=args["lr"]) DCGAN.train_loop(dataset=dataset, epochs=args["epochs"], total_steps=total_steps)
def main(): parser = argparse.ArgumentParser( description="Training VAE on CelebA dataset") parser.add_argument("--model", type=str, default="VAE", choices=["VAE", "VAE_123", "VAE_345"], help="Training model") args = vars(parser.parse_args()) datagen = DataSet(cfg.dataset_dir) dataset, total_steps = datagen.build(batch_size=cfg.batch_size) encoder, decoder, vae_net = build_vae(z_dim=cfg.z_dim) if args["model"] == "VAE": validate_path(VaeConfig.progress_dir) validate_path(VaeConfig.checkpoint_dir) VAE = VaeTrainer(progress_dir=VaeConfig.progress_dir, checkpoint_dir=VaeConfig.checkpoint_dir, encoder=encoder, decoder=decoder, vae_net=vae_net, reconstruction_weight=VaeConfig.reconstruction_weight, z_dim=cfg.z_dim, test_size=cfg.test_size, batch_size=cfg.test_size, learning_rate=cfg.lr) else: validate_path(DfcVaeConfig.progress_dir) validate_path(DfcVaeConfig.checkpoint_dir) VAE = DfcVaeTrainer( progress_dir=DfcVaeConfig.progress_dir, checkpoint_dir=DfcVaeConfig.checkpoint_dir, encoder=encoder, decoder=decoder, vae_net=vae_net, vgg_layers=DfcVaeConfig.vgg19_layers[args["model"]], perceptual_weight=DfcVaeConfig.perceptual_weight, z_dim=cfg.z_dim, test_size=cfg.test_size, batch_size=cfg.test_size, learning_rate=cfg.lr) VAE.train_loop(dataset=dataset, epochs=cfg.epochs, total_steps=total_steps)