Exemplo n.º 1
0
 def get_next_real_img(self):
     data = next(self.loader_iter, None)
     # In case dataset in a loader is used up
     if data is None:
         logger_.info(f"Renew dataset.")
         self.renew_loader_iter()
         data = next(self.loader_iter)
     real_img = data[0].to(self.device)
     return real_img
Exemplo n.º 2
0
 def save_img_if_necessary(self, step_id, cur_local_step, real_img):
     if self.cur_global_step % self.args.save_img_per_iter == 0:
         self.save_synthetic_image(step_id)
         logger_.info(
             f"{[self.cur_global_step]} Save a grid of synthetic images")
     if cur_local_step == 0:
         self.save_real_image(real_img, step_id)
         logger_.info(
             f"{[self.cur_global_step]} Save a grid of real images")
Exemplo n.º 3
0
 def check_structure(cls, config, max_step_idx, device):
     g = cls(config, device)
     logger_.info(f"Registered modules in generator.")
     logger_.info(g)
     for step_idx in range(max_step_idx + 1):
         logger_.info(f"[G] Summary in step {step_idx}")
         logger_.info(f"\n{summary(g, g.sample_noise(), step_idx=step_idx)}")
Exemplo n.º 4
0
 def logging_if_necessary(self, cur_step, cur_local_step, num_iter, loss_d,
                          loss_g):
     # Output training stats
     if self.cur_global_step % self.args.logging_per_iter == 0:
         logger_.info(
             '[%d][%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f' %
             (self.cur_global_step, cur_step, self.args.num_steps,
              cur_local_step, num_iter, loss_d.item(), loss_g.item()))
         writer_.add_scalar(tag="err/d",
                            scalar_value=loss_d.item(),
                            global_step=self.cur_global_step)
         writer_.add_scalar(tag="err/g",
                            scalar_value=loss_g.item(),
                            global_step=self.cur_global_step)
Exemplo n.º 5
0
 def check_structure(cls, config, max_step_idx, device):
     init_img_size = 4
     d = cls(config, device)
     logger_.info(f"Registered modules in discriminator")
     logger_.info(d)
     for step_idx in range(max_step_idx + 1):
         img_size = init_img_size * 2**step_idx
         noise = torch.randn(1, 1, img_size, img_size)
         logger_.info(f"[D] Summary in step {step_idx}")
         logger_.info(f"\n{summary(d, noise, step_idx=step_idx)}")
Exemplo n.º 6
0
 def set_loader(self, step_idx):
     seed.seed_everything(local_seed=step_idx)
     image_size = int(4 * 2**step_idx)
     dataset = dset.ImageFolder(
         root="./files/input/dataset",
         transform=transforms.Compose([
             transforms.Resize(image_size),
             transforms.CenterCrop(image_size),
             transforms.ToTensor(),
             # pixel values are converted from [0, 1] to [-1, 1]
             transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
         ]))
     loader = torch.utils.data.DataLoader(
         dataset,
         batch_size=self.args.batch_size_list[step_idx],
         shuffle=True)
     self.__loader = loader
     self.renew_loader_iter()
     logger_.info(
         f"Set loader with batch size: {self.args.batch_size_list[step_idx]}"
     )
     logger_.info(f"Number of batches: {len(self.__loader)}")
Exemplo n.º 7
0
def main():
    args, config = initialization()
    from common.logger import logger_
    from gans.model.apgan.apgan_gp import APGANGP
    from gans.model.apgan import discriminator as d
    from gans.model.apgan import generator as g
    from gans.loss.gan_loss import GANLoss

    # Set device
    logger_.info("*** SET DEVICE ***")
    device = "cpu" if args.use_gpu == 0 else "cuda"
    logger_.info(f"Device is {device}")

    # Create generator and its optimizer
    logger_.info("*** CREATE GENERATOR ***")
    netG = g.Generator(config["g"], device).to(device)
    logger_.info(f"Optimizer: {netG.optimizer}")
    logger_.info("*** CHECK GENERATOR STRUCTURE ***")
    # g.Generator.check_structure(config["g"], max_step_idx=6, device=device)

    # Create the Discriminator and its optimizer
    logger_.info("*** CREATE DISCRIMINATOR ***")
    netD = d.Discriminator(config["d"], device).to(device)
    logger_.info(f"Optimizer: {netD.optimizer}")
    logger_.info("*** CHECK DISCRIMINATOR STRUCTURE ***")
    # d.Discriminator.check_structure(config["d"], max_step_idx=6, device=device)

    # Loss function
    logger_.info("*** CREATE LOSS FUNCTION ***")
    criterion = GANLoss(loss_type=args.loss_f_type, device=device)
    logger_.info(criterion)

    # criterion = nn.BCELoss()
    logger_.info("*** CREATE GAN NETWORK ***")
    gan = APGANGP(netG, netD, criterion, args, config, device)

    # convert weights of models into double
    gan.g = gan.g.float()
    gan.d = gan.d.float()

    # Training
    logger_.info("*** TRAINING ***")
    print("Starting Training Loop...")
    gan.train(step_from=args.step_from, num_steps=args.num_steps)

    # Save model
    logger_.info("*** SAVE WEIGHTS ***")
    gan.save_model()
    logger_.info("FINISH.")
    exit(0)
Exemplo n.º 8
0
 def train(self, step_from, num_steps):
     for i in range(step_from, num_steps + 1):
         logger_.info(f"*** START TRAIN STEP {i} ***")
         self.set_loader(step_idx=i)
         self.__train_step(cur_step=i)
Exemplo n.º 9
0
 def save_model(self, suffix=""):
     target_root_path = f"{self.args.save_root_path}/model/{self.args.save_key}"
     f_op.create_folder(target_root_path)
     torch.save(self.g.state_dict(), f"{target_root_path}/g{suffix}.ptn")
     # torch.save(self.d.state_dict(), f"{target_root_path}/d{suffix}.ptn")
     logger_.info(f"saved in {target_root_path}")
Exemplo n.º 10
0
 def save_model_if_necessary(self, step_id, cur_local_step):
     if self.args.save_model_per_iter != -1:
         if self.cur_global_step % self.args.save_model_per_iter == 0:
             suffix = f"_{step_id}_{self.cur_global_step}_{cur_local_step}"
             self.save_model(suffix)
             logger_.info(f"{[self.cur_global_step]} Save model")
Exemplo n.º 11
0
 def _initialization(self):
     self.apply(self._initialize_weight)
     for k, v in self.initialized_layers.items():
         logger_.info(f"Num of initialized {k}: {v}")