def __init__(self, opt): """Initialize the BaseModel class. Parameters: opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions When creating your custom class, you need to implement your own initialization. In this fucntion, you should first call <BaseModel.__init__(self, opt)> Then, you need to define four lists: -- self.loss_names (str list): specify the training losses that you want to plot and save. -- self.model_names (str list): specify the images that you want to display and save. -- self.visual_names (str list): define networks used in our training. -- self.optimizers (optimizer list): define and initialize optimizers. You can define one optimizer for each network. If two networks are updated at the same time, you can use itertools.chain to group them. See cycle_gan_model.py for an example. """ self.opt = opt self.gpu_id = opt.gpu_id self.is_train = opt.is_train # get device name: CPU or GPU self.device = (torch.device(f"cuda:{self.gpu_id}") if self.gpu_id is not None else torch.device("cpu")) # save all the checkpoints to save_dir self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) if self.is_train: PromptOnce.makedirs(self.save_dir, not opt.no_confirm) self.loss_names = [] self.model_names = [] self.visual_names = [] self.optimizer_names = [] # self.optimizers = [] self.image_paths = [] self.metric = 0 # used for learning rate policy 'plateau'
def save(self): """ Saves to a .json file :return: """ d = vars(self.opt) PromptOnce.makedirs(os.path.dirname(self.save_file), not self.opt.no_confirm) with open(self.save_file, "w") as f: f.write(json.dumps(d, indent=4))
def _setup(subfolder_name, create_webpage=True): """ Setup outdir, create a webpage Args: subfolder_name: name of the outdir and where the webpage files should go Returns: """ out_dir = get_out_dir(subfolder_name) PromptOnce.makedirs(out_dir, not opt.no_confirm) webpage = None if create_webpage: webpage = html.HTML( out_dir, f"Experiment = {opt.name}, Phase = {subfolder_name} inference, " f"Loaded Epoch = {opt.load_epoch}", ) return out_dir, webpage
def __init__(self, opt): """ Initialize the WarpModel. Either in GAN mode or plain Cross Entropy mode. Args: opt: """ # 3 for RGB self.body_channels = (opt.body_channels if opt.body_representation == "labels" else 3) # 3 for RGB self.cloth_channels = (opt.cloth_channels if opt.cloth_representation == "labels" else 3) #BaseGAN.__init__(self, opt) # or ####super().__init__(opt) ###################################### self.opt = opt self.gpu_id = opt.gpu_id self.is_train = opt.is_train # get device name: CPU or GPU self.device = (torch.device(f"cuda:{self.gpu_id}") if self.gpu_id is not None else torch.device("cpu")) # save all the checkpoints to save_dir self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) if self.is_train: PromptOnce.makedirs(self.save_dir, not opt.no_confirm) self.loss_names = [] self.model_names = [] self.visual_names = [] self.optimizer_names = [] # self.optimizers = [] self.image_paths = [] self.metric = 0 # used for learning rate policy 'plateau' ###################################### self.net_generator = self.define_G().to(self.device) modules.init_weights(self.net_generator, opt.init_type, opt.init_gain) self.model_names = ["generator"] if self.is_train: # setup discriminator self.net_discriminator = discriminators.define_D( self.get_D_inchannels(), 64, opt.discriminator, opt.n_layers_D, opt.norm).to(self.device) modules.init_weights(self.net_discriminator, opt.init_type, opt.init_gain) # load discriminator only at train time self.model_names.append("discriminator") # setup GAN loss use_smooth = True if opt.gan_label_mode == "smooth" else False self.criterion_GAN = modules.loss.GANLoss( opt.gan_mode, smooth_labels=use_smooth).to(self.device) if opt.lambda_discriminator: self.loss_names = ["D", "D_real", "D_fake"] if any(gp_mode in opt.gan_mode for gp_mode in ["gp", "lp"]): self.loss_names += ["D_gp"] self.loss_names += ["G"] if opt.lambda_gan: self.loss_names += ["G_gan"] # Define optimizers self.optimizer_G = optimizers.define_optimizer( self.net_generator.parameters(), opt, "G") self.optimizer_D = optimizers.define_optimizer( self.net_discriminator.parameters(), opt, "D") self.optimizer_names = ("G", "D") ####################################################################### # TODO: decode visuals for cloth self.visual_names = [ "inputs_decoded", "bodys_unnormalized", "fakes_decoded" ] if self.is_train: self.visual_names.append( "targets_decoded") # only show targets during training # we use cross entropy loss in both self.criterion_CE = nn.CrossEntropyLoss() if opt.warp_mode != "gan": # remove discriminator related things if no GAN self.model_names = ["generator"] self.loss_names = "G" del self.net_discriminator del self.optimizer_D self.optimizer_names = ["G"] else: self.loss_names += ["G_ce"]
def __init__(self, opt): #super().__init__(opt) self.opt = opt self.gpu_id = opt.gpu_id self.is_train = opt.is_train # get device name: CPU or GPU self.device = (torch.device(f"cuda:{self.gpu_id}") if self.gpu_id is not None else torch.device("cpu")) # save all the checkpoints to save_dir self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) if self.is_train: PromptOnce.makedirs(self.save_dir, not opt.no_confirm) self.loss_names = [] self.model_names = [] self.visual_names = [] self.optimizer_names = [] # self.optimizers = [] self.image_paths = [] self.metric = 0 # used for learning rate policy 'plateau' self.net_generator = self.define_G().to(self.device) modules.init_weights(self.net_generator, opt.init_type, opt.init_gain) self.model_names = ["generator"] if self.is_train: # setup discriminator self.net_discriminator = discriminators.define_D( self.get_D_inchannels(), 64, opt.discriminator, opt.n_layers_D, opt.norm).to(self.device) modules.init_weights(self.net_discriminator, opt.init_type, opt.init_gain) # load discriminator only at train time self.model_names.append("discriminator") # setup GAN loss use_smooth = True if opt.gan_label_mode == "smooth" else False self.criterion_GAN = modules.loss.GANLoss( opt.gan_mode, smooth_labels=use_smooth).to(self.device) if opt.lambda_discriminator: self.loss_names = ["D", "D_real", "D_fake"] if any(gp_mode in opt.gan_mode for gp_mode in ["gp", "lp"]): self.loss_names += ["D_gp"] self.loss_names += ["G"] if opt.lambda_gan: self.loss_names += ["G_gan"] # Define optimizers self.optimizer_G = optimizers.define_optimizer( self.net_generator.parameters(), opt, "G") self.optimizer_D = optimizers.define_optimizer( self.net_discriminator.parameters(), opt, "D") self.optimizer_names = ("G", "D") # TODO: decode cloth visual self.visual_names = [ "textures_unnormalized", "cloths_decoded", "fakes", "fakes_scaled", ] if self.is_train: self.visual_names.append("targets_unnormalized") # Define additional loss for generator self.criterion_L1 = nn.L1Loss().to(self.device) self.criterion_perceptual = modules.losses.PerceptualLoss( use_style=opt.lambda_style != 0).to(self.device) for loss in ["l1", "content", "style"]: if getattr(opt, "lambda_" + loss) != 0: self.loss_names.append(f"G_{loss}")