def init_loss(self, opt): Base_Model.init_loss(self, opt) # ##################### # define loss functions # ##################### # GAN loss if opt.ganloss == 'gan': self.criterionGAN = GANLoss(use_lsgan=False).to(self.device) elif opt.ganloss == 'lsgan': self.criterionGAN = GANLoss(use_lsgan=True).to(self.device) else: raise ValueError() # identity loss self.criterionIdt = RestructionLoss(opt.restruction_loss).to(self.device) # feature metric loss self.criterionFea = torch.nn.L1Loss() # map loss self.criterionMap = RestructionLoss(opt.map_m_type).to(self.device) # ##################### # initialize optimizers # ##################### self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) self.optimizers = [] self.optimizers.append(self.optimizer_G) self.optimizers.append(self.optimizer_D)
def init_loss(self, opt): Base_Model.init_loss(self, opt) # ##################### # define loss functions # ##################### # identity loss self.criterionIdt = RestructionLoss(opt.idt_loss, opt.idt_reduction).to(self.device) # map loss self.criterionMap = RestructionLoss(opt.map_projection_loss).to( self.device) # ##################### # initialize optimizers # ##################### self.optimizers = [] if self.opt.weight_decay_if: self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2), weight_decay=1e-4) else: self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) self.optimizers.append(self.optimizer_G)
def init_loss(self, opt): Base_Model.init_loss(self, opt) # ##################### # define loss functions # ##################### # GAN loss self.criterionGAN = GANLoss(use_lsgan=True).to(self.device) # identity loss self.criterionIdt = RestructionLoss(opt.idt_loss, opt.idt_reduction).to(self.device) # feature metric loss self.criterionFea = torch.nn.L1Loss().to(self.device) # map loss self.criterionMap = RestructionLoss(opt.map_projection_loss).to(self.device) # ##################### # initialize optimizers # ##################### self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) self.optimizers = [] self.optimizers.append(self.optimizer_G) self.optimizers.append(self.optimizer_D)
def init_loss(self, opt): Base_Model.init_loss(self, opt) # feature metric loss self.criterion = torch.nn.MSELoss() self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2), weight_decay=1e-4) self.optimizers = [] self.optimizers.append(self.optimizer_G)
def init_loss(self, opt): Base_Model.init_loss(self, opt) # ##################### # define loss functions # ##################### # GAN loss if opt.ganloss == 'gan': self.criterionGAN = GANLoss(use_lsgan=False).to(self.device) elif opt.ganloss == 'lsgan': self.criterionGAN = GANLoss(use_lsgan=True).to(self.device) elif opt.ganloss == 'wgan': self.criterionGAN = WGANLoss(grad_penalty=False).to(self.device) elif opt.ganloss == 'wgan_gp': self.criterionGAN = WGANLoss(grad_penalty=True).to(self.device) else: raise ValueError() # identity loss if opt.restruction_loss == 'mse': print('Restruction loss: MSE') self.criterionIdt = torch.nn.MSELoss() elif opt.restruction_loss == 'l1': print('Restruction loss: l1') self.criterionIdt = torch.nn.L1Loss() else: raise ValueError() # feature metric loss self.criterionFea = torch.nn.L1Loss() # map loss self.criterionMap = Map_loss(direct_mean=opt.map_m_type, predict_transition=self.opt.CT_MIN_MAX, gt_transition=self.opt.XRAY1_MIN_MAX).to( self.device) # ##################### # initialize optimizers # ##################### self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) self.optimizers = [] self.optimizers.append(self.optimizer_G) self.optimizers.append(self.optimizer_D)