def __init__(self, logger, use_cpu=False, num_gpu=1, gan_type='WGAN_GP', gan_k=1, lr_dis=1e-4, train_crop_size=40): super(AdversarialLoss, self).__init__() self.logger = logger self.gan_type = gan_type self.gan_k = gan_k self.device = torch.device('cpu' if use_cpu else 'cuda') self.discriminator = discriminator.Discriminator(train_crop_size * 4).to(self.device) if (num_gpu > 1): self.discriminator = nn.DataParallel(self.discriminator, list(range(num_gpu))) if (gan_type in ['WGAN_GP', 'GAN']): self.optimizer = optim.Adam(self.discriminator.parameters(), betas=(0, 0.9), eps=1e-8, lr=lr_dis) else: raise SystemExit('Error: no such type of GAN!') self.bce_loss = torch.nn.BCELoss().to(self.device)
def __init__(self, args, gan_type): super(Adversarial, self).__init__() self.gan_type = gan_type self.gan_k = args.gan_k self.discriminator = discriminator.Discriminator(args, gan_type) if gan_type != 'WGAN_GP': self.optimizer = utility.make_optimizer(args, self.discriminator) else: self.optimizer = optim.Adam( self.discriminator.parameters(), betas=(0, 0.9), eps=1e-8, lr=1e-5 ) self.scheduler = utility.make_scheduler(args, self.optimizer)
def __init__(self, args, gan_type): super(Adversarial, self).__init__() self.gan_type = gan_type self.gan_k = args.gan_k self.aprx_epochs = args.aprx_epochs self.aprx_training_dir = args.aprx_training_dir self.aprx_training_dir_HR = args.aprx_training_dir_HR self.batch_size = args.batch_size self.patch_size = args.patch_size self.discriminator = discriminator.Discriminator(args, gan_type) self.optimizer = utility.make_optimizer(args, self.discriminator) self.scheduler = utility.make_scheduler(args, self.optimizer) self.a_counter = 0
def __init__(self, args, gan_type): super(Adversarial, self).__init__() self.gan_type = gan_type self.gan_k = args.gan_k self.dis = discriminator.Discriminator(args) if gan_type == 'WGAN_GP': # see https://arxiv.org/pdf/1704.00028.pdf pp.4 optim_dict = { 'optimizer': 'ADAM', 'betas': (0, 0.9), 'epsilon': 1e-8, 'lr': 1e-5, 'weight_decay': args.weight_decay, 'decay': args.decay, 'gamma': args.gamma } optim_args = SimpleNamespace(**optim_dict) else: optim_args = args self.optimizer = utility.make_optimizer(optim_args, self.dis)