Example #1
0
 def train_op():
     '''This function will be executed if the model is in training mode'''
     # reduce the learning rate during training if specified
     if "optimization" in self.config and "reduce_lr" in self.config[
             "optimization"]:
         D_lr_factor = self.config["optimization"][
             "D_lr_factor"] if "D_lr_factor" in self.config[
                 "optimization"] else 1
         losses["current_learning_rate"], losses[
             "amplitude_learning_rate"] = update_learning_rate(
                 global_step=self.get_global_step(),
                 num_step=self.config["num_steps"],
                 reduce_lr=self.config["optimization"]["reduce_lr"],
                 learning_rate=self.config["learning_rate"],
                 list_optimizer_G=[self.optimizer_G],
                 list_optimizer_D=[self.optimizer_D],
                 D_lr_factor=D_lr_factor)
     # Update the discriminator
     losses["discriminator"]["total"].backward()
     self.optimizer_D.step()
     # Update the generator
     if self.update_G:
         self.model.netG.zero_grad()
         g_losses = self.G_criterion(input_images_G)
         losses["generator"] = g_losses["generator"]
         losses["generator"]["total"].backward()
         self.optimizer_G.step()
Example #2
0
 def train_op():
     if "optimization" in self.config and "reduce_lr" in self.config[
             "optimization"]:
         # reduce the learning rate if specified
         losses["current_learning_rate"], losses[
             "amplitude_learning_rate"] = update_learning_rate(
                 global_step=self.get_global_step(),
                 num_step=self.config["num_steps"],
                 reduce_lr=self.config["optimization"]["reduce_lr"],
                 learning_rate=self.config["learning_rate"],
                 list_optimizer_G=[self.optimizer])
     # This function will be executed if the model is in training mode
     losses["reconstruction_loss"].backward()
     self.optimizer.step()
Example #3
0
        def train_op():
            # This function will be executed if the model is in training mode
            if "optimization" in self.config and "reduce_lr" in self.config[
                    "optimization"]:
                # reduce the learning rate if specified
                D_lr_factor = self.config["optimization"][
                    "D_lr_factor"] if "D_lr_factor" in self.config[
                        "optimization"] else 1
                losses["current_learning_rate"], losses[
                    "amplitude_learning_rate"] = update_learning_rate(
                        global_step=self.get_global_step(),
                        num_step=self.config["num_steps"],
                        reduce_lr=self.config["optimization"]["reduce_lr"],
                        learning_rate=self.config["learning_rate"],
                        list_optimizer_G=[self.optimizer_G],
                        list_optimizer_D=[self.optimizer_D],
                        D_lr_factor=D_lr_factor)

            # Update the generators
            set_requires_grad([self.model.netD], False)
            self.optimizer_G.zero_grad()
            losses["generator"]["total"].backward()
            self.optimizer_G.step()

            # Update the discriminators
            if "optimization" in self.config and "D_accuracy" in self.config[
                    "optimization"]:
                losses["discriminator"]["update"] = 0
                random_part_A = torch.rand(1)
                if (losses["discriminator"]["accuracy"] <
                        self.config["optimization"]["D_accuracy"]) or (
                            random_part_A < 0.01):
                    set_requires_grad([self.model.netD], True)
                    self.optimizer_D.zero_grad()
                    losses["discriminator"]["total"].backward()
                    self.optimizer_D.step()
                    losses["discriminator"]["update"] = 1

                set_requires_grad([self.model.netD], True)
            else:
                set_requires_grad([self.model.netD], True)
                self.optimizer_D.zero_grad()
                losses["discriminator"]["total"].backward()
                self.optimizer_D.step()
                losses["discriminator"]["update"] = 1
Example #4
0
 def train_op():
     '''This function will be executed if the model is in training mode'''
     # reduce the learning rate during training if specified
     if "optimization" in self.config and "reduce_lr" in self.config[
             "optimization"]:
         optimizer_G_list = self.optimizer_Lin if self.only_latent_layer else self.optimizer_G
         D_lr_factor = self.config["optimization"][
             "D_lr_factor"] if "D_lr_factor" in self.config[
                 "optimization"] else 1
         losses["current_learning_rate"], losses[
             "amplitude_learning_rate"] = update_learning_rate(
                 global_step=self.get_global_step(),
                 num_step=self.config["num_steps"],
                 reduce_lr=self.config["optimization"]["reduce_lr"],
                 learning_rate=self.config["learning_rate"],
                 list_optimizer_G=[optimizer_G_list],
                 list_optimizer_D=[self.optimizer_D],
                 D_lr_factor=D_lr_factor)
     # update the generators
     if self.update_G and not self.only_latent_layer:
         set_requires_grad([self.model.netD_A, self.model.netD_B],
                           False)
         self.optimizer_G.zero_grad()
         losses["generators"].backward()
         self.optimizer_G.step()
     # train only linear layers
     if self.only_latent_layer:
         set_requires_grad([
             self.model.netG_A.enc, self.model.netG_A.dec,
             self.model.netG_B.enc, self.model.netG_B.dec
         ], False)
         self.optimizer_Lin.zero_grad()
         losses["generators"].backward()
         self.optimizer_Lin.step()
     # update the discriminators
     set_requires_grad([self.model.netD_A, self.model.netD_B], True)
     self.optimizer_D.zero_grad()
     losses["discriminators"].backward()
     self.optimizer_D.step()
Example #5
0
        def train_op():
            # This function will be executed if the model is in training mode
            if "optimization" in self.config and "reduce_lr" in self.config[
                    "optimization"]:
                # reduce the learning rate if specified
                D_lr_factor = self.config["optimization"][
                    "D_lr_factor"] if "D_lr_factor" in self.config[
                        "optimization"] else 1
                losses["current_learning_rate"], losses[
                    "amplitude_learning_rate"] = update_learning_rate(
                        global_step=self.get_global_step(),
                        num_step=self.config["num_steps"],
                        reduce_lr=self.config["optimization"]["reduce_lr"],
                        learning_rate=self.config["learning_rate"],
                        list_optimizer_G=[self.optimizer_G],
                        list_optimizer_D=[
                            self.optimizer_D_A, self.optimizer_D_B
                        ],
                        D_lr_factor=D_lr_factor)

            # Update the generators
            set_requires_grad([self.model.netD_A, self.model.netD_B], False)
            if not self.only_latent_layer:
                self.optimizer_G.zero_grad()
                losses["generators"].backward()
                self.optimizer_G.step()
            else:
                set_requires_grad([
                    self.model.netG_A.enc, self.model.netG_A.dec,
                    self.model.netG_B.enc, self.model.netG_B.dec
                ], False)
                self.optimizer_Lin.zero_grad()
                losses["generators"].backward()
                self.optimizer_Lin.step()

            # Update the discriminators

            if "optimization" in self.config and "D_accuracy" in self.config[
                    "optimization"]:
                losses["discriminator_sketch"]["update"] = 0
                random_part_A = torch.rand(1)
                if (losses["discriminator_sketch"]["accuracy"] <
                        self.config["optimization"]["D_accuracy"][0]) or (
                            random_part_A < 0.01):
                    set_requires_grad([self.model.netD_A], True)
                    self.optimizer_D_A.zero_grad()
                    losses["face_cycle"]["disc_total"].backward()
                    self.optimizer_D_A.step()
                    losses["discriminator_sketch"]["update"] = 1

                losses["discriminator_face"]["update"] = 0
                random_part_B = torch.rand(1)
                if (losses["discriminator_face"]["accuracy"] <
                        self.config["optimization"]["D_accuracy"][1]) or (
                            random_part_B < 0.01):
                    set_requires_grad([self.model.netD_B], True)
                    self.optimizer_D_B.zero_grad()
                    losses["sketch_cycle"]["disc_total"].backward()
                    self.optimizer_D_B.step()
                    losses["discriminator_face"]["update"] = 1
                set_requires_grad([self.model.netD_A, self.model.netD_B], True)
            else:
                set_requires_grad([self.model.netD_A, self.model.netD_B], True)
                self.optimizer_D_A.zero_grad()
                self.optimizer_D_B.zero_grad()
                losses["discriminators"].backward()
                self.optimizer_D_A.step()
                self.optimizer_D_B.step()
                losses["discriminator_sketch"]["update"] = 1
                losses["discriminator_face"]["update"] = 1