def initialize(self, opt): self.opt = opt self.gpu_ids = opt.gpu_ids self.isTrain = opt.isTrain self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) if opt.eval: opt.eval_opt = opt opt.eval_opt.phase = 'test' opt.eval_opt.nThreads = 4 opt.eval_opt.batchSize = opt.eval_batch opt.eval_opt.serial_batches = True opt.eval_opt.no_flip = True opt.eval_opt.dataset_mode = 'aligned' self.eval_data_loader = create_dataloader(opt.eval_opt) print('#eval images = %d' % len(self.eval_data_loader))
from options.test_options import TestOptions from data.data_loader import create_dataloader from models.models import create_model from utils.visualizer import Visualizer from utils import html from ipdb import set_trace opt = TestOptions().parse() opt.nThreads = 1 # test code only supports nThreads = 1 opt.batchSize = 1 # test code only supports batchSize = 1 opt.serial_batches = True # no shuffle opt.no_flip = True # no flip opt.toy_data = False opt.eval = False opt.aux = False data_loader, dataset, dataset_size = create_dataloader(opt) model = create_model(opt) visualizer = Visualizer(opt) # create website web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) webpage = html.HTML( web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) set_trace() # test for i, data in enumerate(dataset): if i >= opt.how_many: break model.set_input(data) model.test()
options_path = args.options opt = options.parse(options_path, is_train=False) util.mkdirs((path for key, path in opt['path'].items() if not key == 'pretrain_model_G')) # Make all directories needed opt = options.dict2box(opt) from data.datasets import create_dataset from data.data_loader import create_dataloader from models.models import create_model # Create test dataset and dataloader test_loaders = [] test_set_names = [] for dataset_opt in opt.datasets: test_set = create_dataset(dataset_opt) test_loader = create_dataloader(test_set, dataset_opt) test_size = len(test_set) test_set_name = dataset_opt.name print('Number of test images in [%s]: %d' % (test_set_name, test_size)) test_loaders.append(test_loader) test_set_names.append(test_set_name) # Create model model = create_model(opt) # Path for log file test_log_path = os.path.join(opt.path.log, 'test_log.txt') if os.path.exists(test_log_path): os.remove(test_log_path) print('Old test log is removed.')
def main(): # Create train dataset train_set_opt = opt.datasets[0] train_set = create_dataset(train_set_opt) train_size = int(math.ceil(len(train_set) / train_set_opt.batch_size)) print('Number of train images: %d batches of size %d' % (train_size, train_set_opt.batch_size)) total_iters = int(opt.train.niter) total_epoches = int(math.ceil(total_iters / train_size)) print('Total epoches needed: %d' % total_epoches) # Create val dataset val_set_opt = opt.datasets[1] val_set = create_dataset(val_set_opt) val_size = len(val_set) print('Number of val images: %d' % val_size) # Create dataloader train_loader = create_dataloader(train_set, train_set_opt) val_loader = create_dataloader(val_set, val_set_opt) # Create model model = create_model(opt) # Create binarization module import bin global bin_op bin_op = bin.BinOp(model.netG) model.train() # Create logger logger = Logger(opt) current_step = 0 need_make_val_dir = True start_time = time.time() for epoch in range(total_epoches): for i, train_data in enumerate(train_loader): current_step += 1 if current_step > total_iters: break train_start_time = time.time() # Training model.feed_data(train_data) # optimize_parameters 함수를 분할해서 대체함 for binarization # model.optimize_parameters(current_step) bin_op.binarization() model.forward_G() model.optimizer_G.zero_grad() model.backward_G() bin_op.restore() bin_op.updateBinaryGradWeight() model.optimizer_G.step() train_duration = time.time() - train_start_time if current_step % opt.logger.print_freq == 0: losses = model.get_current_losses() logger.print_results(losses, epoch, current_step, train_duration, 'loss') if current_step % opt.logger.save_checkpoint_freq == 0: print('Saving the model at the end of current_step %d' % (current_step)) model.save(current_step) # Validation if current_step % opt.train.val_freq == 0: validate(val_loader, val_size, model, logger, epoch, current_step) model.update_learning_rate(step=current_step, scheme=opt.train.lr_scheme) print('End of Epoch %d' % epoch) print('Saving the final model') model.save('latest') print('End of Training \t Time Taken: %d sec' % (time.time() - start_time))