def initialize(self, opt): self.opt = opt self.gpu_ids = opt.gpu_ids self.isTrain = opt.isTrain self.Tensor = torch.cuda.FloatTensor self.save_dir = os.path.join(opt.result_root_dir, opt.variable, opt.variable_value, 'Net') util.mkdirs(self.save_dir) mask_one = torch.ones((opt.height, opt.width)) self.mask_one = torch.unsqueeze(torch.unsqueeze(mask_one, 0), 0) self.mask_one = Variable(self.mask_one).cuda() self.netG_A = networks.define_g(1, 1, opt.norm, opt.init_type, self.gpu_ids) if not self.isTrain or opt.continue_train: which_epoch = opt.which_epoch self.load_network(self.netG_A, 'G_A', which_epoch) if self.isTrain: self.optimizer_G = torch.optim.Adam(self.netG_A.parameters(), lr=opt.lr, betas=(0.5, 0.999)) self.optimizers.append(self.optimizer_G) for optimizer in self.optimizers: self.schedulers.append(networks.get_scheduler(optimizer)) print('---------- Networks initialized -------------') networks.print_network(self.netG_A) print('-----------------------------------------------')
def parse(self): if not self.initialized: self.initialize() self.opt = self.parser.parse_args() self.opt.isTrain = self.isTrain # train or test str_ids = self.opt.gpu_ids.split(',') self.opt.gpu_ids = [] for str_id in str_ids: id = int(str_id) if id >= 0: self.opt.gpu_ids.append(id) # set gpu ids if len(self.opt.gpu_ids) > 0: torch.cuda.set_device(self.opt.gpu_ids[0]) args = vars(self.opt) #参数保存到opt.txt # save to the disk expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name) util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, 'opt.txt') with open(file_name, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') for k, v in sorted(args.items()): opt_file.write('%s: %s\n' % (str(k), str(v))) opt_file.write('-------------- End ----------------\n') return self.opt
def save_opt(opt): args = vars(opt) expr_dir = os.path.join(opt.result_root_dir, opt.variable, opt.variable_value) util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, 'opt.txt') with open(file_name, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') for k, v in sorted(args.items()): opt_file.write('%s: %s\n' % (str(k), str(v))) opt_file.write('-------------- End ----------------\n')
def initialize(self, opt): self.opt = opt self.gpu_ids = opt.gpu_ids self.isTrain = opt.isTrain self.Tensor = torch.cuda.FloatTensor self.save_dir = os.path.join(opt.result_root_dir, opt.variable, opt.variable_value, 'Net') util.mkdirs(self.save_dir) self.netG_A = networks.define_g(3 + 1, 3, 64, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids) self.netG_B = networks.define_g(3, 3, 64, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids) if self.isTrain: use_sigmoid = opt.no_lsgan self.netD_A = networks.define_d(3, 64, opt.norm, opt.init_type, self.gpu_ids) self.netD_B = networks.define_d(3, 64, opt.norm, opt.init_type, self.gpu_ids) if not self.isTrain or opt.continue_train: which_epoch = opt.which_epoch self.load_network(self.netG_A, 'G_A', which_epoch) self.load_network(self.netG_B, 'G_B', which_epoch) if self.isTrain: self.load_network(self.netD_A, 'D_A', which_epoch) self.load_network(self.netD_B, 'D_B', which_epoch) if self.isTrain: self.old_lr = opt.lr self.criterionGAN = networks.GANLoss(tensor=self.Tensor) self.criterionCycle = torch.nn.L1Loss() self.criterionIdt = torch.nn.L1Loss() self.optimizer_G = torch.optim.Adam(itertools.chain( self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(0.5, 0.999)) self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(), lr=opt.lr, betas=(0.5, 0.999)) self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(), lr=opt.lr, betas=(0.5, 0.999)) self.optimizers = [] self.schedulers = [] self.optimizers.append(self.optimizer_G) self.optimizers.append(self.optimizer_D_A) self.optimizers.append(self.optimizer_D_B) for optimizer in self.optimizers: self.schedulers.append(networks.get_scheduler(optimizer, opt))
def __init__(self, opt): self.display_id = 1 self.use_html = opt.isTrain and not opt.no_html self.win_size = 256 self.opt = opt self.saved = False self.root_dir = os.path.join(opt.result_root_dir, opt.variable) if self.use_html: self.web_dir = os.path.join(self.root_dir, opt.variable_value, opt.phase) self.img_dir = os.path.join(self.web_dir, 'images') util.mkdirs([self.web_dir, self.img_dir]) self.log_name = os.path.join(self.root_dir, opt.variable_value, opt.phase, 'loss_log.txt')
def __init__(self, opt): # self.opt = opt self.display_id = 1 self.use_html = opt.isTrain and not opt.no_html self.win_size = 256 self.name = 'experiment_name' self.opt = opt self.saved = False if self.use_html: self.web_dir = os.path.join('./checkpoints', 'experiment_name', 'web') self.img_dir = os.path.join(self.web_dir, 'images') #print('create web directory %s...' % self.web_dir) util.mkdirs([self.web_dir, self.img_dir]) self.log_name = os.path.join('./checkpoints', 'experiment_name', 'loss_log.txt') with open(self.log_name, "a") as log_file: now = time.strftime("%c") log_file.write('================ Training Loss (%s) ================\n' % now)
def initialize(self, opt): self.opt = opt self.gpu_ids = opt.gpu_ids self.isTrain = opt.isTrain self.Tensor = torch.cuda.FloatTensor self.save_dir = os.path.join(opt.result_root_dir, opt.variable, opt.variable_value, 'Net') util.mkdirs(self.save_dir) mask_one = torch.ones((opt.height, opt.width)) self.mask_one = torch.unsqueeze(torch.unsqueeze(mask_one, 0), 0) self.mask_one = Variable(self.mask_one).cuda() self.netG_A = networks.define_g_a(3, 1, 1, opt.norm, opt.init_type, self.gpu_ids) self.netG_B = networks.define_g_b(1, 3, opt.norm, opt.init_type, self.gpu_ids) if self.isTrain: self.netD_A = networks.define_d(1, 64, opt.norm, opt.init_type, self.gpu_ids) self.netD_B = networks.define_d(3, 64, opt.norm, opt.init_type, self.gpu_ids) if not self.isTrain or opt.continue_train: which_epoch = opt.which_epoch self.load_network(self.netG_A, 'G_A', which_epoch) self.load_network(self.netG_B, 'G_B', which_epoch) if self.isTrain: self.load_network(self.netD_A, 'D_A', which_epoch) self.load_network(self.netD_B, 'D_B', which_epoch) if self.isTrain: # define loss functions self.criterionGAN = networks.GANLoss(tensor=self.Tensor) # initialize optimizers self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(0.5, 0.999)) self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(), lr=opt.lr, betas=(0.5, 0.999)) self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(), lr=opt.lr, betas=(0.5, 0.999)) self.optimizers.append(self.optimizer_G) self.optimizers.append(self.optimizer_D_A) self.optimizers.append(self.optimizer_D_B) for optimizer in self.optimizers: self.schedulers.append(networks.get_scheduler(optimizer))