Exemple #1
0
    def __init__(self, config, root, model, *args, **kwargs):
        super().__init__(config, root, model, *args, **kwargs)
        self.logger = get_logger("Iterator")
        assert config["model"] in [
            "model.vae_gan.VAE_GAN", "model.vae_gan.VAE_WGAN"
        ], "This Iterator only supports the VAE GAN models: VAE_GAN and VAE_WGAN."
        # export to the right gpu if specified in the config
        self.device = set_gpu(config)
        self.logger.debug(f"Model will pushed to the device: {self.device}")
        # get the config and the logger
        self.config = config
        set_random_state(random_seed=self.config["random_seed"])
        self.batch_size = config['batch_size']
        # Config will be tested inside the Model class even for the iterator
        # Log the architecture of the model
        self.logger.debug(f"{model}")
        self.model = model.to(self.device)

        self.optimizer_G = torch.optim.Adam(itertools.chain(
            self.model.netG.parameters()),
                                            lr=self.config["learning_rate"])
        D_lr_factor = self.config["optimization"][
            "D_lr_factor"] if "D_lr_factor" in config["optimization"] else 1
        self.optimizer_D = torch.optim.Adam(self.model.netD.parameters(),
                                            lr=D_lr_factor *
                                            self.config["learning_rate"])

        self.real_labels = torch.ones(self.batch_size, device=self.device)
        self.fake_labels = torch.zeros(self.batch_size, device=self.device)
Exemple #2
0
    def __init__(self, config, root, model, *args, **kwargs):
        super().__init__(config, root, model, *args, **kwargs)
        assert config[
            "model"] == "model.gan.DCGAN", "This iterator only supports the model: model.gan.DCGAN"
        self.logger = get_logger("Iterator")
        # export to the right gpu if specified in the config
        self.device = set_gpu(config)
        self.logger.debug(f"Model will pushed to the device: {self.device}")
        # get the config and the logger
        self.config = config
        set_random_state(self.config["random_seed"])
        self.batch_size = config['batch_size']
        # Log the architecture of the model
        self.logger.debug(f"{model}")
        self.model = model.to(self.device)

        self.optimizer_G = torch.optim.Adam(self.model.netG.parameters(),
                                            lr=self.config["learning_rate"],
                                            betas=(.5, .999))
        D_lr_factor = self.config["optimization"][
            "D_lr_factor"] if "D_lr_factor" in config["optimization"] else 1
        self.optimizer_D = torch.optim.Adam(self.model.netD.parameters(),
                                            lr=D_lr_factor *
                                            self.config["learning_rate"],
                                            betas=(.5, .999))

        self.real_labels = torch.ones(self.batch_size, device=self.device)
        self.fake_labels = torch.zeros(self.batch_size, device=self.device)
        self.wasserstein = bool(
            self.config["losses"]['adversarial_loss'] == 'wasserstein')
Exemple #3
0
 def __init__(self, config, root, model, *args, **kwargs):
     """Initialise all important parameters of the iterator."""
     super().__init__(config, root, model, *args, **kwargs)
     assert config[
         "model_type"] != "sketch2face", "This iterator does not support sketch2face models only single GAN models supported."
     assert config[
         "model"] == "model.vae_gan.VAE_WGAN", "This iterator only supports the model: model.vae_gan.VAE_WGAN"
     # get the config and the logger
     self.config = config
     self.logger = get_logger("Iterator")
     set_random_state(self.config["random_seed"])
     # Check if cuda is available
     self.device = set_gpu(self.config)
     self.logger.debug(f"Model will pushed to the device: {self.device}")
     # Log the architecture of the model
     self.logger.debug(f"{model}")
     self.model = model.to(self.device)
     # save important constants
     self.learning_rate = self.config["learning_rate"]
     self.batch_size = self.config["batch_size"]
     self.critic_iter = self.config["losses"][
         "update_disc"] if "update_disc" in self.config["losses"] else 5
     # WGAN values from paper
     b1, b2 = 0.5, 0.999
     # use ADAM optimizer
     self.optimizer_G = torch.optim.Adam(self.model.netG.parameters(),
                                         lr=self.learning_rate,
                                         betas=(b1, b2))
     # check if there is a different learning rate for the discriminators
     D_lr_factor = self.config["optimization"][
         "D_lr_factor"] if "D_lr_factor" in config["optimization"] else 1
     self.optimizer_D = torch.optim.Adam(self.model.netD.parameters(),
                                         lr=self.learning_rate *
                                         D_lr_factor,
                                         betas=(b1, b2))
Exemple #4
0
    def __init__(self, config, root, model, *args, **kwargs):
        super().__init__(config, root, model, *args, **kwargs)
        assert config[
            "model"] == "model.cycle_gan.Cycle_GAN", "This CycleGAN iterator only works with with the Cycle_GAN model."
        assert config["losses"][
            "adversarial_loss"] != "wasserstein", "This CycleGAN does not support an adversarial wasserstein loss"
        self.logger = get_logger("Iterator")
        # export to the right gpu if specified in the config
        self.device = set_gpu(config)
        self.logger.debug(f"Model will pushed to the device: {self.device}")
        # get the config and the logger
        self.config = config
        set_random_state(self.config["random_seed"])
        self.batch_size = config['batch_size']
        # Config will be tested inside the Model class even for the iterator
        # Log the architecture of the model
        self.logger.debug(f"{model}")
        self.model = model.to(self.device)
        # load pretrained models if specified in the config
        self.model, log_string = load_pretrained_vaes(config=self.config,
                                                      model=self.model)
        self.logger.debug(log_string)

        self.optimizer_G = torch.optim.Adam(
            itertools.chain(self.model.netG_A.parameters(),
                            self.model.netG_B.parameters()),
            lr=self.config["learning_rate"])  # betas=(opt.beta1, 0.999))
        D_lr_factor = self.config["optimization"][
            "D_lr_factor"] if "D_lr_factor" in config["optimization"] else 1
        self.optimizer_D_A = torch.optim.Adam(
            self.model.netD_A.parameters(),
            lr=D_lr_factor *
            self.config["learning_rate"])  # betas=(opt.beta1, 0.999))
        self.optimizer_D_B = torch.optim.Adam(
            self.model.netD_B.parameters(),
            lr=D_lr_factor *
            self.config["learning_rate"])  # betas=(opt.beta1, 0.999))

        self.add_latent_layer = bool(
            'num_latent_layer' in self.config['variational']
            and self.config['variational']['num_latent_layer'] > 0)
        self.only_latent_layer = bool(
            'only_latent_layer' in self.config['optimization']
            and self.config['optimization']['only_latent_layer'])
        if self.only_latent_layer:
            self.optimizer_Lin = torch.optim.Adam(
                itertools.chain(self.model.netG_A.latent_layer.parameters(),
                                self.model.netG_B.latent_layer.parameters()),
                lr=self.config["learning_rate"])
            self.logger.debug(
                "Only latent layers are optimized\nNumber of latent layers: {}"
                .format(self.config['variational']['num_latent_layer']))
        self.real_labels = torch.ones(self.batch_size, device=self.device)
        self.fake_labels = torch.zeros(self.batch_size, device=self.device)
Exemple #5
0
 def __init__(self, config, root, model, *args, **kwargs):
     super().__init__(config, root, model, *args, **kwargs)
     self.logger = get_logger("Iterator")
     # export to the right gpu if specified in the config
     self.device = set_gpu(config)
     self.logger.debug(f"Model will pushed to the device: {self.device}")
     # get the config and the logger
     self.config = config
     set_random_state(random_seed=self.config["random_seed"])
     # Config will be tested inside the Model class even for the iterator
     # Log the architecture of the model
     self.logger.debug(f"{model}")
     self.vae = model
     b1, b2 = 0.5, 0.999
     self.optimizer = torch.optim.Adam(self.vae.parameters(),
                                       lr=self.config["learning_rate"],
                                       betas=(b1, b2))
     self.vae.to(self.device)
Exemple #6
0
 def __init__(self, config, root, model, *args, **kwargs):
     """Initialise all important parameters of the iterator."""
     super().__init__(config, root, model, *args, **kwargs)
     assert self.config[
         "model"] == "model.cycle_gan.Cycle_WGAN", "This iterator only supports the model: model.wgan.Cycle_WGAN"
     # get the config and the logger
     self.config = config
     self.logger = get_logger("Iterator")
     set_random_state(self.config["random_seed"])
     # Check if cuda is available
     self.device = set_gpu(self.config)
     self.logger.debug(f"Model will pushed to the device: {self.device}")
     # Log the architecture of the model
     self.logger.debug(f"{model}")
     self.model = model.to(self.device)
     # save important constants
     self.learning_rate = self.config["learning_rate"]
     self.batch_size = self.config["batch_size"]
     self.critic_iter = self.config["losses"][
         "update_disc"] if "update_disc" in self.config["losses"] else 5
     # WGAN values from paper
     b1, b2 = 0.5, 0.999
     # load pretrained models if specified in the config
     self.model, log_string = load_pretrained_vaes(config=self.config,
                                                   model=self.model)
     self.logger.debug(log_string)
     # check if there are latent layers
     self.add_latent_layer = bool(
         'num_latent_layer' in self.config['variational']
         and self.config['variational']['num_latent_layer'] > 0)
     # check if only the latent layers should be updated
     self.only_latent_layer = bool(
         'only_latent_layer' in self.config['optimization']
         and self.config['optimization']['only_latent_layer'])
     if self.only_latent_layer:
         self.critic_iter = 1
         self.logger.debug(
             "critic_iter set to 1 since only_latent_layer is True.")
         self.optimizer_Lin = torch.optim.Adam(
             itertools.chain(self.model.netG_A.latent_layer.parameters(),
                             self.model.netG_B.latent_layer.parameters()),
             lr=self.config["learning_rate"],
             betas=(b1, b2))
         self.logger.debug(
             "Only latent layers are optimized\nNumber of latent layers: {}"
             .format(self.config['variational']['num_latent_layer']))
     else:
         # use ADAM optimizer
         self.optimizer_G = torch.optim.Adam(itertools.chain(
             self.model.netG_A.parameters(),
             self.model.netG_B.parameters()),
                                             lr=self.learning_rate,
                                             betas=(b1, b2))
     # check if there is a different learning rate for the discriminators
     D_lr_factor = self.config["optimization"][
         "D_lr_factor"] if "D_lr_factor" in config["optimization"] else 1
     self.optimizer_D = torch.optim.Adam(
         itertools.chain(self.model.netD_A.parameters(),
                         self.model.netD_B.parameters()),
         lr=D_lr_factor * self.config["learning_rate"],
         betas=(b1, b2))