def __init__(self, opt): """Initialize the CycleGAN class. Parameters: opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions """ self.opt = opt # define networks (both Generators and discriminators) self.netG_A2C = eval(opt.SRModel)(1, 1, opt.up).to(opt.device) self.netG_C2B = eval(opt.CModel)(1, 3).to(opt.device) # define loss functions self.criterionSR = losses.L1Loss() self.criterionC = losses.L1Loss() self.criterionPSNR = losses.PSNRLoss() # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>. self.optimizers = [] self.optimizer_G = torch.optim.Adam(self.netG_A2C.parameters(), lr = opt.lr) self.optimizer_D = torch.optim.Adam(self.netG_C2B.parameters(), lr = opt.lr) self.optimizers.append(self.optimizer_G) self.optimizers.append(self.optimizer_D)
def __init__(self, opt): """Initialize the CycleGAN class. Parameters: opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions """ self.opt = opt self.loss_names = [ 'D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B' ] # define networks (both Generators and discriminators) # The naming is different from those used in the paper. # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X) self.netG_A = RDDBNetB(1, 3, 64, nb=1, mode=opt.mode).to(opt.device) self.netG_B = RDDBNetA(3, 1, 64, nb=1, mode=opt.mode).to(opt.device) self.netD_A = NLayerDiscriminator(3, 64, 3).to(opt.device) self.netD_B = NLayerDiscriminator(1, 64, 3).to(opt.device) self.fake_A_pool = ImagePool(opt.pool_size) self.fake_B_pool = ImagePool(opt.pool_size) # define loss functions self.criterionGAN = GANLoss(gan_mode='DSSIM', device=opt.device) # define GAN loss. self.criterionCycle = losses.L1Loss() self.criterionIdt = losses.L1Loss() # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>. self.optimizers = [] self.optimizer_G = torch.optim.Adam(itertools.chain( self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) self.optimizer_D = torch.optim.Adam(itertools.chain( self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) self.optimizers.append(self.optimizer_G) self.optimizers.append(self.optimizer_D)
def __init__(self, opt): """Initialize the CycleGAN class. Parameters: opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions """ self.opt = opt self.loss_names = [ 'D_A', 'G_A', 'cycle_A', 'iden_A', 'D_B', 'G_B', 'cycle_B', 'iden_B' ] # define networks (both Generators and discriminators) # The naming is different from those used in the paper. # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X) if opt.net == 'SRdens': self.netG_A = SRDenseNetA(1, 3, mode=opt.mode, num_blocks=2, num_layers=2).to(opt.device) self.netG_B = SRDenseNetB(3, 1, mode=opt.mode, num_blocks=2, num_layers=2).to(opt.device) self.netD_A = NLayerDiscriminator(3, 64, 2).to(opt.device) self.netD_B = NLayerDiscriminator(1, 64, 2).to(opt.device) elif self.opt.net == '1': self.netG_A = RDDBNetB(3, 3, 64, nb=3, mode=opt.mode).to(opt.device) self.netG_B = RDDBNetA(3, 3, 64, nb=3, mode=opt.mode).to(opt.device) self.netD_A = NLayerDiscriminator(3, 64, 2).to(opt.device) self.netD_B = NLayerDiscriminator(3, 64, 2).to(opt.device) elif self.opt.net == '2': self.netG_C = SRDenseNetA(1, 1, mode=opt.mode, num_blocks=2, num_layers=2).to(opt.device) self.netD_A = NLayerDiscriminator(3, 64, 2).to(opt.device) self.netD_B = NLayerDiscriminator(1, 64, 2).to(opt.device) self.netG_A = define_G(1, 3, opt.ngf, opt.netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain).to(opt.device) self.netG_B = define_G(3, 1, opt.ngf, opt.netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain).to(opt.device) else: self.netG_A = RDDBNetB(1, 3, 64, nb=3, mode=opt.mode).to(opt.device) self.netG_B = RDDBNetA(3, 1, 64, nb=3, mode=opt.mode).to(opt.device) self.netD_A = NLayerDiscriminator(3, 64, 2).to(opt.device) self.netD_B = NLayerDiscriminator(1, 64, 2).to(opt.device) self.fake_A_pool = ImagePool(opt.pool_size) self.fake_B_pool = ImagePool(opt.pool_size) # define loss functions self.criterionGAN = GANLoss(gan_mode='lsgan', device=opt.device) # define GAN loss. self.criterion = losses.MSELoss() self.criterionCycle = losses.L1Loss() self.criterionIdt = losses.L1Loss() # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>. self.optimizers = [] self.optimizer_G = torch.optim.Adam(itertools.chain( self.netG_A.parameters(), self.netG_B.parameters(), self.netG_C.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) self.optimizer_D = torch.optim.Adam(itertools.chain( self.netD_A.parameters(), self.netD_B.parameters()), lr=1e-5, betas=(opt.beta1, 0.999)) self.optimizers.append(self.optimizer_G) self.optimizers.append(self.optimizer_D)