def __init__(self, opt): """Initialize this model class. Parameters: opt -- training/test options A few things can be done here. - (required) call the initialization function of BaseModel - define loss function, visualization images, model names, and optimizers """ BaseModel.__init__(self, opt) # call the initialization method of BaseModel # specify the training losses you want to print out. The program will call base_model.get_current_losses to plot the losses to the console and save them to the disk. self.loss_names = ['loss_G'] # specify the images you want to save and display. The program will call base_model.get_current_visuals to save and display these images. self.visual_names = ['data_A', 'data_B', 'output'] # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks to save and load networks. # you can use opt.isTrain to specify different behaviors for training and test. For example, some networks will not be used during test, and you don't need to load them. self.model_names = ['G'] # define networks; you can use opt.isTrain to specify different behaviors for training and test. self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, gpu_ids=self.gpu_ids) if self.isTrain: # only defined during training time # define your loss functions. You can use losses provided by torch.nn such as torch.nn.L1Loss. # We also provide a GANLoss class "networks.GANLoss". self.criterionGAN = networks.GANLoss().to(self.device) self.criterionLoss = torch.nn.L1Loss() # define and initialize optimizers. You can define one optimizer for each network. # If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. self.optimizer = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) self.optimizers = [self.optimizer]
def __init__(self, opt): BaseModel.__init__(self, opt) self.output = None self.loss_names = [ 'G_real', 'G_fake', 'D_real', 'D_fake', 'D_gp', 'G', 'D' ] if self.isTrain: # only defined during training time self.model_names = ['G', 'D'] else: self.model_names = ['G'] # define networks self.netG = networks.define_G(opt, self.opt.gpu_ids) if self.isTrain: self.netD = networks.define_D(opt, self.opt.gpu_ids) # define loss functions self.criterionG = GANLoss(opt.g_loss_mode, 'G', opt.which_D).to(self.device) self.criterionD = GANLoss(opt.d_loss_mode, 'D', opt.which_D).to(self.device) # initialize optimizers self.optimizer_G = get_optimizer(opt.optim_type)( self.netG.parameters(), lr=opt.lr_g) self.optimizer_D = get_optimizer(opt.optim_type)( self.netD.parameters(), lr=opt.lr_d) self.optimizers.append(self.optimizer_G) self.optimizers.append(self.optimizer_D)
def __init__(self, opt): """Initialize this model class. Parameters: opt -- training/test options A few things can be done here. - (required) call the initialization function of BaseModel - define loss function, visualization images, model names, and optimizers """ BaseModel.__init__(self, opt) # call the initialization method of BaseModel self.output = None self.loss_names = ['D_real', 'D_fake', 'D_gp', 'G', 'D'] self.visual_names = ['real_visual', 'gen_visual'] if self.isTrain: # only defined during training time self.model_names = ['G', 'D'] else: self.model_names = ['G'] # define networks self.netG = networks.define_G(opt, self.gpu_ids) if self.isTrain: # only defined during training time self.netD = networks.define_D(opt, self.gpu_ids) # define loss functions self.criterionG = None # Will be define by G_mutations self.criterionD = GANLoss(opt.d_loss_mode, 'D', opt.which_D).to(self.device) # define G mutations self.G_mutations = [ GANLoss(g_loss, 'G', opt.which_D).to(self.device) for g_loss in opt.g_loss_mode ] # initialize optimizers self.optimizer_G = get_optimizer(opt.optim_type)( self.netG.parameters(), lr=opt.lr_g) self.optimizer_D = get_optimizer(opt.optim_type)( self.netD.parameters(), lr=opt.lr_d) self.optimizers.append(self.optimizer_G) self.optimizers.append(self.optimizer_D) # Evolutionary candidatures setting (init) self.G_candis = [copy.deepcopy(self.netG.state_dict())] * opt.candi_num self.optG_candis = [copy.deepcopy(self.optimizer_G.state_dict()) ] * opt.candi_num self.loss_mode_to_idx = { loss_mode: i for i, loss_mode in enumerate(opt.g_loss_mode) }
def __init__(self, opt): BaseModel.__init__(self, opt) # call the initialization method of BaseModel self.output = None self.loss_names = ['D_real', 'D_fake', 'D_gp', 'G', 'D'] self.visual_names = ['real_visual', 'gen_visual'] if self.isTrain: # only defined during training time self.model_names = ['G', 'D'] else: self.model_names = ['G'] # define networks self.netG = networks.define_G(opt, self.gpu_ids) if self.isTrain: # only defined during training time self.netD = networks.define_D(opt, self.gpu_ids) # define loss functions self.criterionD = GANLoss(opt.d_loss_mode, 'D', opt.which_D).to(self.device) # define G mutations self.G_mutations = [ GANLoss(g_loss, 'G', opt.which_D).to(self.device) for g_loss in opt.g_loss_mode ] # initialize optimizers self.optimizer_G = get_optimizer(opt.optim_type)( self.netG.parameters(), lr=opt.lr_g) self.optimizer_D = get_optimizer(opt.optim_type)( self.netD.parameters(), lr=opt.lr_d) self.optimizers.append(self.optimizer_G) self.optimizers.append(self.optimizer_D) # Evolutionary candidatures setting (init) self.G_candis = [copy.deepcopy(self.netG.state_dict())] * opt.candi_num self.optG_candis = [copy.deepcopy(self.optimizer_G.state_dict()) ] * opt.candi_num
def __init__(self, opt): """Initialize the pix2pix class. Parameters: opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions """ assert (not opt.isTrain) BaseModel.__init__(self, opt) # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses> self.loss_names = [] # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals> self.visual_names = ['real_A', 'fake_B'] # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks> self.model_names = ['G' + opt.model_suffix ] # only generator is needed. self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids) # assigns the model to self.netG_[suffix] so that it can be loaded # please see <BaseModel.load_networks> setattr(self, 'netG' + opt.model_suffix, self.netG) # store netG in self.